Commit 9dae0a3f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A bunch of fixes for perf and kprobes:
   - revert a commit that caused a perf group regression
   - silence dmesg spam
   - fix kprobe probing errors on ia64 and ppc64
   - filter kprobe faults from userspace
   - lockdep fix for perf exit path
   - prevent perf #GP in KVM guest
   - correct perf event and filters"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  kprobes: Fix "Failed to find blacklist" probing errors on ia64 and ppc64
  kprobes/x86: Don't try to resolve kprobe faults from userspace
  perf/x86/intel: Avoid spamming kernel log for BTS buffer failure
  perf/x86/intel: Protect LBR and extra_regs against KVM lying
  perf: Fix lockdep warning on process exit
  perf/x86/intel/uncore: Fix SNB-EP/IVT Cbox filter mappings
  perf/x86/intel: Use proper dTLB-load-misses event on IvyBridge
  perf: Revert ("perf: Always destroy groups on exit")
parents 43a255c2 d81b4253
...@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) ...@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
continue; continue;
if (event->attr.config1 & ~er->valid_mask) if (event->attr.config1 & ~er->valid_mask)
return -EINVAL; return -EINVAL;
/* Check if the extra msrs can be safely accessed*/
if (!er->extra_msr_access)
return -ENXIO;
reg->idx = er->idx; reg->idx = er->idx;
reg->config = event->attr.config1; reg->config = event->attr.config1;
......
...@@ -295,6 +295,7 @@ struct extra_reg { ...@@ -295,6 +295,7 @@ struct extra_reg {
u64 config_mask; u64 config_mask;
u64 valid_mask; u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */ int idx; /* per_xxx->regs[] reg index */
bool extra_msr_access;
}; };
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
...@@ -303,6 +304,7 @@ struct extra_reg { ...@@ -303,6 +304,7 @@ struct extra_reg {
.config_mask = (m), \ .config_mask = (m), \
.valid_mask = (vm), \ .valid_mask = (vm), \
.idx = EXTRA_REG_##i, \ .idx = EXTRA_REG_##i, \
.extra_msr_access = true, \
} }
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
......
...@@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void) ...@@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void)
} }
} }
/*
* Under certain circumstances, access certain MSR may cause #GP.
* The function tests if the input MSR can be safely accessed.
*/
static bool check_msr(unsigned long msr, u64 mask)
{
u64 val_old, val_new, val_tmp;
/*
* Read the current value, change it and read it back to see if it
* matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
if (rdmsrl_safe(msr, &val_old))
return false;
/*
* Only change the bits which can be updated by wrmsrl.
*/
val_tmp = val_old ^ mask;
if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
return false;
if (val_new != val_tmp)
return false;
/* Here it's sure that the MSR can be safely accessed.
* Restore the old value and return.
*/
wrmsrl(msr, val_old);
return true;
}
static __init void intel_sandybridge_quirk(void) static __init void intel_sandybridge_quirk(void)
{ {
x86_pmu.check_microcode = intel_snb_check_microcode; x86_pmu.check_microcode = intel_snb_check_microcode;
...@@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void) ...@@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void)
union cpuid10_ebx ebx; union cpuid10_ebx ebx;
struct event_constraint *c; struct event_constraint *c;
unsigned int unused; unsigned int unused;
int version; struct extra_reg *er;
int version, i;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
...@@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void) ...@@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void)
case 62: /* IvyBridge EP */ case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs)); sizeof(hw_cache_extra_regs));
...@@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void) ...@@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void)
} }
} }
/*
* Access LBR MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support LBR MSR
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
x86_pmu.lbr_nr = 0;
}
/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
* Check all extra_regs here.
*/
if (x86_pmu.extra_regs) {
for (er = x86_pmu.extra_regs; er->msr; er++) {
er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
x86_pmu.lbr_sel_map = NULL;
}
}
/* Support full width counters using alternative MSR range */ /* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) { if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask; x86_pmu.max_period = x86_pmu.cntval_mask;
......
...@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu) ...@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
if (!x86_pmu.bts) if (!x86_pmu.bts)
return 0; return 0;
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
if (unlikely(!buffer)) if (unlikely(!buffer)) {
WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
return -ENOMEM; return -ENOMEM;
}
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16; thresh = max / 16;
......
...@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { ...@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
...@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { ...@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1), SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
...@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { ...@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
......
...@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs) ...@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p; struct kprobe *p;
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
if (user_mode_vm(regs))
return 0;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/* /*
* We don't want to be preempted for the entire * We don't want to be preempted for the entire
......
...@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event, ...@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx, struct perf_event_context *child_ctx,
struct task_struct *child) struct task_struct *child)
{ {
perf_remove_from_context(child_event, true); /*
* Do not destroy the 'original' grouping; because of the context
* switch optimization the original events could've ended up in a
* random child task.
*
* If we were to destroy the original group, all group related
* operations would cease to function properly after this random
* child dies.
*
* Do destroy all inherited groups, we don't care about those
* and being thorough is better.
*/
perf_remove_from_context(child_event, !!child_event->parent);
/* /*
* It can happen that the parent exits first, and has events * It can happen that the parent exits first, and has events
...@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event, ...@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
static void perf_event_exit_task_context(struct task_struct *child, int ctxn) static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{ {
struct perf_event *child_event, *next; struct perf_event *child_event, *next;
struct perf_event_context *child_ctx; struct perf_event_context *child_ctx, *parent_ctx;
unsigned long flags; unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) { if (likely(!child->perf_event_ctxp[ctxn])) {
...@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) ...@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
raw_spin_lock(&child_ctx->lock); raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx); task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL; child->perf_event_ctxp[ctxn] = NULL;
/*
* In order to avoid freeing: child_ctx->parent_ctx->task
* under perf_event_context::lock, grab another reference.
*/
parent_ctx = child_ctx->parent_ctx;
if (parent_ctx)
get_ctx(parent_ctx);
/* /*
* If this context is a clone; unclone it so it can't get * If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all * swapped to another process while we're removing all
...@@ -7508,6 +7529,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) ...@@ -7508,6 +7529,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
update_context_time(child_ctx); update_context_time(child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags); raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Now that we no longer hold perf_event_context::lock, drop
* our extra child_ctx->parent_ctx reference.
*/
if (parent_ctx)
put_ctx(parent_ctx);
/* /*
* Report the task dead after unscheduling the events so that we * Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still * won't get any samples after PERF_RECORD_EXIT. We can however still
......
...@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start, ...@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
{ {
unsigned long *iter; unsigned long *iter;
struct kprobe_blacklist_entry *ent; struct kprobe_blacklist_entry *ent;
unsigned long offset = 0, size = 0; unsigned long entry, offset = 0, size = 0;
for (iter = start; iter < end; iter++) { for (iter = start; iter < end; iter++) {
if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { entry = arch_deref_entry_point((void *)*iter);
pr_err("Failed to find blacklist %p\n", (void *)*iter);
if (!kernel_text_address(entry) ||
!kallsyms_lookup_size_offset(entry, &size, &offset)) {
pr_err("Failed to find blacklist at %p\n",
(void *)entry);
continue; continue;
} }
ent = kmalloc(sizeof(*ent), GFP_KERNEL); ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent) if (!ent)
return -ENOMEM; return -ENOMEM;
ent->start_addr = *iter; ent->start_addr = entry;
ent->end_addr = *iter + size; ent->end_addr = entry + size;
INIT_LIST_HEAD(&ent->list); INIT_LIST_HEAD(&ent->list);
list_add_tail(&ent->list, &kprobe_blacklist); list_add_tail(&ent->list, &kprobe_blacklist);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment