Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
936c663a
Commit
936c663a
authored
Mar 27, 2015
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/x86' into perf/core, because it's ready
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
072e5a1c
50f16a8b
Changes
11
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
1514 additions
and
63 deletions
+1514
-63
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/hw_breakpoint.c
+1
-1
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/hw_breakpoint.c
+1
-1
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeature.h
+8
-1
arch/x86/include/asm/processor.h
arch/x86/include/asm/processor.h
+3
-0
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/Makefile
+1
-1
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/common.c
+39
-0
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
+1379
-0
include/linux/perf_event.h
include/linux/perf_event.h
+48
-2
kernel/events/core.c
kernel/events/core.c
+25
-48
kernel/events/hw_breakpoint.c
kernel/events/hw_breakpoint.c
+4
-4
kernel/trace/trace_uprobe.c
kernel/trace/trace_uprobe.c
+5
-5
No files found.
arch/arm/kernel/hw_breakpoint.c
View file @
936c663a
...
@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
...
@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Per-cpu breakpoints are not supported by our stepping
* Per-cpu breakpoints are not supported by our stepping
* mechanism.
* mechanism.
*/
*/
if
(
!
bp
->
hw
.
bp_
target
)
if
(
!
bp
->
hw
.
target
)
return
-
EINVAL
;
return
-
EINVAL
;
/*
/*
...
...
arch/arm64/kernel/hw_breakpoint.c
View file @
936c663a
...
@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
...
@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Disallow per-task kernel breakpoints since these would
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
* complicate the stepping code.
*/
*/
if
(
info
->
ctrl
.
privilege
==
AARCH64_BREAKPOINT_EL1
&&
bp
->
hw
.
bp_
target
)
if
(
info
->
ctrl
.
privilege
==
AARCH64_BREAKPOINT_EL1
&&
bp
->
hw
.
target
)
return
-
EINVAL
;
return
-
EINVAL
;
return
0
;
return
0
;
...
...
arch/x86/include/asm/cpufeature.h
View file @
936c663a
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
#include <asm/disabled-features.h>
#include <asm/disabled-features.h>
#endif
#endif
#define NCAPINTS 1
1
/* N 32-bit words worth of info */
#define NCAPINTS 1
3
/* N 32-bit words worth of info */
#define NBUGINTS 1
/* N 32-bit bug flags */
#define NBUGINTS 1
/* N 32-bit bug flags */
/*
/*
...
@@ -226,6 +226,7 @@
...
@@ -226,6 +226,7 @@
#define X86_FEATURE_ERMS ( 9*32+ 9)
/* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_ERMS ( 9*32+ 9)
/* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10)
/* Invalidate Processor Context ID */
#define X86_FEATURE_INVPCID ( 9*32+10)
/* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11)
/* Restricted Transactional Memory */
#define X86_FEATURE_RTM ( 9*32+11)
/* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12)
/* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14)
/* Memory Protection Extension */
#define X86_FEATURE_MPX ( 9*32+14)
/* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16)
/* AVX-512 Foundation */
#define X86_FEATURE_AVX512F ( 9*32+16)
/* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18)
/* The RDSEED instruction */
#define X86_FEATURE_RDSEED ( 9*32+18)
/* The RDSEED instruction */
...
@@ -242,6 +243,12 @@
...
@@ -242,6 +243,12 @@
#define X86_FEATURE_XGETBV1 (10*32+ 2)
/* XGETBV with ECX = 1 */
#define X86_FEATURE_XGETBV1 (10*32+ 2)
/* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3)
/* XSAVES/XRSTORS */
#define X86_FEATURE_XSAVES (10*32+ 3)
/* XSAVES/XRSTORS */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1)
/* LLC QoS if 1 */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0)
/* LLC occupancy monitoring if 1 */
/*
/*
* BUG word(s)
* BUG word(s)
*/
*/
...
...
arch/x86/include/asm/processor.h
View file @
936c663a
...
@@ -109,6 +109,9 @@ struct cpuinfo_x86 {
...
@@ -109,6 +109,9 @@ struct cpuinfo_x86 {
/* in KB - valid for CPUS which support this call: */
/* in KB - valid for CPUS which support this call: */
int
x86_cache_size
;
int
x86_cache_size
;
int
x86_cache_alignment
;
/* In bytes */
int
x86_cache_alignment
;
/* In bytes */
/* Cache QoS architectural values: */
int
x86_cache_max_rmid
;
/* max index */
int
x86_cache_occ_scale
;
/* scale to bytes */
int
x86_power
;
int
x86_power
;
unsigned
long
loops_per_jiffy
;
unsigned
long
loops_per_jiffy
;
/* cpuid returned max cores value: */
/* cpuid returned max cores value: */
...
...
arch/x86/kernel/cpu/Makefile
View file @
936c663a
...
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
...
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
endif
obj-$(CONFIG_CPU_SUP_INTEL)
+=
perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL)
+=
perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL)
+=
perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
obj-$(CONFIG_CPU_SUP_INTEL)
+=
perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
obj-$(CONFIG_CPU_SUP_INTEL)
+=
perf_event_intel_rapl.o
obj-$(CONFIG_CPU_SUP_INTEL)
+=
perf_event_intel_rapl.o
perf_event_intel_cqm.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE)
+=
perf_event_intel_uncore.o
\
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE)
+=
perf_event_intel_uncore.o
\
perf_event_intel_uncore_snb.o
\
perf_event_intel_uncore_snb.o
\
...
...
arch/x86/kernel/cpu/common.c
View file @
936c663a
...
@@ -646,6 +646,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
...
@@ -646,6 +646,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c
->
x86_capability
[
10
]
=
eax
;
c
->
x86_capability
[
10
]
=
eax
;
}
}
/* Additional Intel-defined flags: level 0x0000000F */
if
(
c
->
cpuid_level
>=
0x0000000F
)
{
u32
eax
,
ebx
,
ecx
,
edx
;
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
cpuid_count
(
0x0000000F
,
0
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
c
->
x86_capability
[
11
]
=
edx
;
if
(
cpu_has
(
c
,
X86_FEATURE_CQM_LLC
))
{
/* will be overridden if occupancy monitoring exists */
c
->
x86_cache_max_rmid
=
ebx
;
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count
(
0x0000000F
,
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
c
->
x86_capability
[
12
]
=
edx
;
if
(
cpu_has
(
c
,
X86_FEATURE_CQM_OCCUP_LLC
))
{
c
->
x86_cache_max_rmid
=
ecx
;
c
->
x86_cache_occ_scale
=
ebx
;
}
}
else
{
c
->
x86_cache_max_rmid
=
-
1
;
c
->
x86_cache_occ_scale
=
-
1
;
}
}
/* AMD-defined flags: level 0x80000001 */
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
xlvl
=
cpuid_eax
(
0x80000000
);
c
->
extended_cpuid_level
=
xlvl
;
c
->
extended_cpuid_level
=
xlvl
;
...
@@ -834,6 +858,20 @@ static void generic_identify(struct cpuinfo_x86 *c)
...
@@ -834,6 +858,20 @@ static void generic_identify(struct cpuinfo_x86 *c)
detect_nopl
(
c
);
detect_nopl
(
c
);
}
}
static
void
x86_init_cache_qos
(
struct
cpuinfo_x86
*
c
)
{
/*
* The heavy lifting of max_rmid and cache_occ_scale are handled
* in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
* in case CQM bits really aren't there in this CPU.
*/
if
(
c
!=
&
boot_cpu_data
)
{
boot_cpu_data
.
x86_cache_max_rmid
=
min
(
boot_cpu_data
.
x86_cache_max_rmid
,
c
->
x86_cache_max_rmid
);
}
}
/*
/*
* This does the hard work of actually picking apart the CPU stuff...
* This does the hard work of actually picking apart the CPU stuff...
*/
*/
...
@@ -923,6 +961,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
...
@@ -923,6 +961,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
init_hypervisor
(
c
);
init_hypervisor
(
c
);
x86_init_rdrand
(
c
);
x86_init_rdrand
(
c
);
x86_init_cache_qos
(
c
);
/*
/*
* Clear/Set all flags overriden by options, need do it
* Clear/Set all flags overriden by options, need do it
...
...
arch/x86/kernel/cpu/perf_event_intel_cqm.c
0 → 100644
View file @
936c663a
This diff is collapsed.
Click to expand it.
include/linux/perf_event.h
View file @
936c663a
...
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
...
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
#include <linux/sysfs.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <asm/local.h>
#include <asm/local.h>
struct
perf_callchain_entry
{
struct
perf_callchain_entry
{
...
@@ -118,10 +119,16 @@ struct hw_perf_event {
...
@@ -118,10 +119,16 @@ struct hw_perf_event {
struct
hrtimer
hrtimer
;
struct
hrtimer
hrtimer
;
};
};
struct
{
/* tracepoint */
struct
{
/* tracepoint */
struct
task_struct
*
tp_target
;
/* for tp_event->class */
/* for tp_event->class */
struct
list_head
tp_list
;
struct
list_head
tp_list
;
};
};
struct
{
/* intel_cqm */
int
cqm_state
;
int
cqm_rmid
;
struct
list_head
cqm_events_entry
;
struct
list_head
cqm_groups_entry
;
struct
list_head
cqm_group_entry
;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct
{
/* breakpoint */
struct
{
/* breakpoint */
/*
/*
...
@@ -129,12 +136,12 @@ struct hw_perf_event {
...
@@ -129,12 +136,12 @@ struct hw_perf_event {
* problem hw_breakpoint has with context
* problem hw_breakpoint has with context
* creation and event initalization.
* creation and event initalization.
*/
*/
struct
task_struct
*
bp_target
;
struct
arch_hw_breakpoint
info
;
struct
arch_hw_breakpoint
info
;
struct
list_head
bp_list
;
struct
list_head
bp_list
;
};
};
#endif
#endif
};
};
struct
task_struct
*
target
;
int
state
;
int
state
;
local64_t
prev_count
;
local64_t
prev_count
;
u64
sample_period
;
u64
sample_period
;
...
@@ -271,6 +278,11 @@ struct pmu {
...
@@ -271,6 +278,11 @@ struct pmu {
*/
*/
size_t
task_ctx_size
;
size_t
task_ctx_size
;
/*
* Return the count value for a counter.
*/
u64
(
*
count
)
(
struct
perf_event
*
event
);
/*optional*/
};
};
/**
/**
...
@@ -547,6 +559,35 @@ struct perf_output_handle {
...
@@ -547,6 +559,35 @@ struct perf_output_handle {
int
page
;
int
page
;
};
};
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct
perf_cgroup_info
{
u64
time
;
u64
timestamp
;
};
struct
perf_cgroup
{
struct
cgroup_subsys_state
css
;
struct
perf_cgroup_info
__percpu
*
info
;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static
inline
struct
perf_cgroup
*
perf_cgroup_from_task
(
struct
task_struct
*
task
)
{
return
container_of
(
task_css
(
task
,
perf_event_cgrp_id
),
struct
perf_cgroup
,
css
);
}
#endif
/* CONFIG_CGROUP_PERF */
#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_PERF_EVENTS
extern
int
perf_pmu_register
(
struct
pmu
*
pmu
,
const
char
*
name
,
int
type
);
extern
int
perf_pmu_register
(
struct
pmu
*
pmu
,
const
char
*
name
,
int
type
);
...
@@ -740,6 +781,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
...
@@ -740,6 +781,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
__perf_event_task_sched_out
(
prev
,
next
);
__perf_event_task_sched_out
(
prev
,
next
);
}
}
static
inline
u64
__perf_event_count
(
struct
perf_event
*
event
)
{
return
local64_read
(
&
event
->
count
)
+
atomic64_read
(
&
event
->
child_count
);
}
extern
void
perf_event_mmap
(
struct
vm_area_struct
*
vma
);
extern
void
perf_event_mmap
(
struct
vm_area_struct
*
vma
);
extern
struct
perf_guest_info_callbacks
*
perf_guest_cbs
;
extern
struct
perf_guest_info_callbacks
*
perf_guest_cbs
;
extern
int
perf_register_guest_info_callbacks
(
struct
perf_guest_info_callbacks
*
callbacks
);
extern
int
perf_register_guest_info_callbacks
(
struct
perf_guest_info_callbacks
*
callbacks
);
...
...
kernel/events/core.c
View file @
936c663a
...
@@ -34,11 +34,11 @@
...
@@ -34,11 +34,11 @@
#include <linux/syscalls.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/mm_types.h>
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/mman.h>
#include <linux/compat.h>
#include <linux/compat.h>
...
@@ -351,32 +351,6 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
...
@@ -351,32 +351,6 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
#ifdef CONFIG_CGROUP_PERF
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct
perf_cgroup_info
{
u64
time
;
u64
timestamp
;
};
struct
perf_cgroup
{
struct
cgroup_subsys_state
css
;
struct
perf_cgroup_info
__percpu
*
info
;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static
inline
struct
perf_cgroup
*
perf_cgroup_from_task
(
struct
task_struct
*
task
)
{
return
container_of
(
task_css
(
task
,
perf_event_cgrp_id
),
struct
perf_cgroup
,
css
);
}
static
inline
bool
static
inline
bool
perf_cgroup_match
(
struct
perf_event
*
event
)
perf_cgroup_match
(
struct
perf_event
*
event
)
{
{
...
@@ -3220,7 +3194,10 @@ static void __perf_event_read(void *info)
...
@@ -3220,7 +3194,10 @@ static void __perf_event_read(void *info)
static
inline
u64
perf_event_count
(
struct
perf_event
*
event
)
static
inline
u64
perf_event_count
(
struct
perf_event
*
event
)
{
{
return
local64_read
(
&
event
->
count
)
+
atomic64_read
(
&
event
->
child_count
);
if
(
event
->
pmu
->
count
)
return
event
->
pmu
->
count
(
event
);
return
__perf_event_count
(
event
);
}
}
static
u64
perf_event_read
(
struct
perf_event
*
event
)
static
u64
perf_event_read
(
struct
perf_event
*
event
)
...
@@ -7149,7 +7126,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
...
@@ -7149,7 +7126,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct
perf_event
*
group_leader
,
struct
perf_event
*
group_leader
,
struct
perf_event
*
parent_event
,
struct
perf_event
*
parent_event
,
perf_overflow_handler_t
overflow_handler
,
perf_overflow_handler_t
overflow_handler
,
void
*
context
)
void
*
context
,
int
cgroup_fd
)
{
{
struct
pmu
*
pmu
;
struct
pmu
*
pmu
;
struct
perf_event
*
event
;
struct
perf_event
*
event
;
...
@@ -7204,16 +7181,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
...
@@ -7204,16 +7181,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if
(
task
)
{
if
(
task
)
{
event
->
attach_state
=
PERF_ATTACH_TASK
;
event
->
attach_state
=
PERF_ATTACH_TASK
;
if
(
attr
->
type
==
PERF_TYPE_TRACEPOINT
)
event
->
hw
.
tp_target
=
task
;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
/*
* hw_breakpoint is a bit difficult here..
* XXX pmu::event_init needs to know what task to account to
* and we cannot use the ctx information because we need the
* pmu before we get a ctx.
*/
*/
else
if
(
attr
->
type
==
PERF_TYPE_BREAKPOINT
)
event
->
hw
.
target
=
task
;
event
->
hw
.
bp_target
=
task
;
#endif
}
}
if
(
!
overflow_handler
&&
parent_event
)
{
if
(
!
overflow_handler
&&
parent_event
)
{
...
@@ -7245,6 +7218,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
...
@@ -7245,6 +7218,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if
(
!
has_branch_stack
(
event
))
if
(
!
has_branch_stack
(
event
))
event
->
attr
.
branch_sample_type
=
0
;
event
->
attr
.
branch_sample_type
=
0
;
if
(
cgroup_fd
!=
-
1
)
{
err
=
perf_cgroup_connect
(
cgroup_fd
,
event
,
attr
,
group_leader
);
if
(
err
)
goto
err_ns
;
}
pmu
=
perf_init_event
(
event
);
pmu
=
perf_init_event
(
event
);
if
(
!
pmu
)
if
(
!
pmu
)
goto
err_ns
;
goto
err_ns
;
...
@@ -7268,6 +7247,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
...
@@ -7268,6 +7247,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event
->
destroy
(
event
);
event
->
destroy
(
event
);
module_put
(
pmu
->
module
);
module_put
(
pmu
->
module
);
err_ns:
err_ns:
if
(
is_cgroup_event
(
event
))
perf_detach_cgroup
(
event
);
if
(
event
->
ns
)
if
(
event
->
ns
)
put_pid_ns
(
event
->
ns
);
put_pid_ns
(
event
->
ns
);
kfree
(
event
);
kfree
(
event
);
...
@@ -7486,6 +7467,7 @@ SYSCALL_DEFINE5(perf_event_open,
...
@@ -7486,6 +7467,7 @@ SYSCALL_DEFINE5(perf_event_open,
int
move_group
=
0
;
int
move_group
=
0
;
int
err
;
int
err
;
int
f_flags
=
O_RDWR
;
int
f_flags
=
O_RDWR
;
int
cgroup_fd
=
-
1
;
/* for future expandability... */
/* for future expandability... */
if
(
flags
&
~
PERF_FLAG_ALL
)
if
(
flags
&
~
PERF_FLAG_ALL
)
...
@@ -7551,21 +7533,16 @@ SYSCALL_DEFINE5(perf_event_open,
...
@@ -7551,21 +7533,16 @@ SYSCALL_DEFINE5(perf_event_open,
get_online_cpus
();
get_online_cpus
();
if
(
flags
&
PERF_FLAG_PID_CGROUP
)
cgroup_fd
=
pid
;
event
=
perf_event_alloc
(
&
attr
,
cpu
,
task
,
group_leader
,
NULL
,
event
=
perf_event_alloc
(
&
attr
,
cpu
,
task
,
group_leader
,
NULL
,
NULL
,
NULL
);
NULL
,
NULL
,
cgroup_fd
);
if
(
IS_ERR
(
event
))
{
if
(
IS_ERR
(
event
))
{
err
=
PTR_ERR
(
event
);
err
=
PTR_ERR
(
event
);
goto
err_cpus
;
goto
err_cpus
;
}
}
if
(
flags
&
PERF_FLAG_PID_CGROUP
)
{
err
=
perf_cgroup_connect
(
pid
,
event
,
&
attr
,
group_leader
);
if
(
err
)
{
__free_event
(
event
);
goto
err_cpus
;
}
}
if
(
is_sampling_event
(
event
))
{
if
(
is_sampling_event
(
event
))
{
if
(
event
->
pmu
->
capabilities
&
PERF_PMU_CAP_NO_INTERRUPT
)
{
if
(
event
->
pmu
->
capabilities
&
PERF_PMU_CAP_NO_INTERRUPT
)
{
err
=
-
ENOTSUPP
;
err
=
-
ENOTSUPP
;
...
@@ -7802,7 +7779,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
...
@@ -7802,7 +7779,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
*/
*/
event
=
perf_event_alloc
(
attr
,
cpu
,
task
,
NULL
,
NULL
,
event
=
perf_event_alloc
(
attr
,
cpu
,
task
,
NULL
,
NULL
,
overflow_handler
,
context
);
overflow_handler
,
context
,
-
1
);
if
(
IS_ERR
(
event
))
{
if
(
IS_ERR
(
event
))
{
err
=
PTR_ERR
(
event
);
err
=
PTR_ERR
(
event
);
goto
err
;
goto
err
;
...
@@ -8163,7 +8140,7 @@ inherit_event(struct perf_event *parent_event,
...
@@ -8163,7 +8140,7 @@ inherit_event(struct perf_event *parent_event,
parent_event
->
cpu
,
parent_event
->
cpu
,
child
,
child
,
group_leader
,
parent_event
,
group_leader
,
parent_event
,
NULL
,
NULL
);
NULL
,
NULL
,
-
1
);
if
(
IS_ERR
(
child_event
))
if
(
IS_ERR
(
child_event
))
return
child_event
;
return
child_event
;
...
...
kernel/events/hw_breakpoint.c
View file @
936c663a
...
@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
...
@@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
*/
*/
static
int
task_bp_pinned
(
int
cpu
,
struct
perf_event
*
bp
,
enum
bp_type_idx
type
)
static
int
task_bp_pinned
(
int
cpu
,
struct
perf_event
*
bp
,
enum
bp_type_idx
type
)
{
{
struct
task_struct
*
tsk
=
bp
->
hw
.
bp_
target
;
struct
task_struct
*
tsk
=
bp
->
hw
.
target
;
struct
perf_event
*
iter
;
struct
perf_event
*
iter
;
int
count
=
0
;
int
count
=
0
;
list_for_each_entry
(
iter
,
&
bp_task_head
,
hw
.
bp_list
)
{
list_for_each_entry
(
iter
,
&
bp_task_head
,
hw
.
bp_list
)
{
if
(
iter
->
hw
.
bp_
target
==
tsk
&&
if
(
iter
->
hw
.
target
==
tsk
&&
find_slot_idx
(
iter
)
==
type
&&
find_slot_idx
(
iter
)
==
type
&&
(
iter
->
cpu
<
0
||
cpu
==
iter
->
cpu
))
(
iter
->
cpu
<
0
||
cpu
==
iter
->
cpu
))
count
+=
hw_breakpoint_weight
(
iter
);
count
+=
hw_breakpoint_weight
(
iter
);
...
@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
...
@@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
int
nr
;
int
nr
;
nr
=
info
->
cpu_pinned
;
nr
=
info
->
cpu_pinned
;
if
(
!
bp
->
hw
.
bp_
target
)
if
(
!
bp
->
hw
.
target
)
nr
+=
max_task_bp_pinned
(
cpu
,
type
);
nr
+=
max_task_bp_pinned
(
cpu
,
type
);
else
else
nr
+=
task_bp_pinned
(
cpu
,
bp
,
type
);
nr
+=
task_bp_pinned
(
cpu
,
bp
,
type
);
...
@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
...
@@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
weight
=
-
weight
;
weight
=
-
weight
;
/* Pinned counter cpu profiling */
/* Pinned counter cpu profiling */
if
(
!
bp
->
hw
.
bp_
target
)
{
if
(
!
bp
->
hw
.
target
)
{
get_bp_info
(
bp
->
cpu
,
type
)
->
cpu_pinned
+=
weight
;
get_bp_info
(
bp
->
cpu
,
type
)
->
cpu_pinned
+=
weight
;
return
;
return
;
}
}
...
...
kernel/trace/trace_uprobe.c
View file @
936c663a
...
@@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
...
@@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
return
true
;
return
true
;
list_for_each_entry
(
event
,
&
filter
->
perf_events
,
hw
.
tp_list
)
{
list_for_each_entry
(
event
,
&
filter
->
perf_events
,
hw
.
tp_list
)
{
if
(
event
->
hw
.
t
p_t
arget
->
mm
==
mm
)
if
(
event
->
hw
.
target
->
mm
==
mm
)
return
true
;
return
true
;
}
}
...
@@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
...
@@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
static
inline
bool
static
inline
bool
uprobe_filter_event
(
struct
trace_uprobe
*
tu
,
struct
perf_event
*
event
)
uprobe_filter_event
(
struct
trace_uprobe
*
tu
,
struct
perf_event
*
event
)
{
{
return
__uprobe_perf_filter
(
&
tu
->
filter
,
event
->
hw
.
t
p_t
arget
->
mm
);
return
__uprobe_perf_filter
(
&
tu
->
filter
,
event
->
hw
.
target
->
mm
);
}
}
static
int
uprobe_perf_close
(
struct
trace_uprobe
*
tu
,
struct
perf_event
*
event
)
static
int
uprobe_perf_close
(
struct
trace_uprobe
*
tu
,
struct
perf_event
*
event
)
...
@@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
...
@@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
bool
done
;
bool
done
;
write_lock
(
&
tu
->
filter
.
rwlock
);
write_lock
(
&
tu
->
filter
.
rwlock
);
if
(
event
->
hw
.
t
p_t
arget
)
{
if
(
event
->
hw
.
target
)
{
list_del
(
&
event
->
hw
.
tp_list
);
list_del
(
&
event
->
hw
.
tp_list
);
done
=
tu
->
filter
.
nr_systemwide
||
done
=
tu
->
filter
.
nr_systemwide
||
(
event
->
hw
.
t
p_t
arget
->
flags
&
PF_EXITING
)
||
(
event
->
hw
.
target
->
flags
&
PF_EXITING
)
||
uprobe_filter_event
(
tu
,
event
);
uprobe_filter_event
(
tu
,
event
);
}
else
{
}
else
{
tu
->
filter
.
nr_systemwide
--
;
tu
->
filter
.
nr_systemwide
--
;
...
@@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
...
@@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
int
err
;
int
err
;
write_lock
(
&
tu
->
filter
.
rwlock
);
write_lock
(
&
tu
->
filter
.
rwlock
);
if
(
event
->
hw
.
t
p_t
arget
)
{
if
(
event
->
hw
.
target
)
{
/*
/*
* event->parent != NULL means copy_process(), we can avoid
* event->parent != NULL means copy_process(), we can avoid
* uprobe_apply(). current->mm must be probed and we can rely
* uprobe_apply(). current->mm must be probed and we can rely
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment