Commit 3286be94 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A couple of small fixes to x86 perf drivers:

   - Measure L2 for HW_CACHE* events on AMD

   - Fix the address filter handling in the intel/pt driver

   - Handle the BTS disabling at the proper place"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/amd: Make HW_CACHE_REFERENCES and HW_CACHE_MISSES measure L2
  perf/x86/intel/pt: Do validate the size of a kernel address filter
  perf/x86/intel/pt: Fix kernel address filter's offset validation
  perf/x86/intel/pt: Fix an off-by-one in address filter configuration
  perf/x86/intel: Don't disable "intel_bts" around "intel" event batching
parents 6ffa36a5 080fe0b7
...@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = ...@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{ {
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081, [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
......
...@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs ...@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
* disabled state if called consecutively. * disabled state if called consecutively.
* *
* During consecutive calls, the same disable value will be written to related * During consecutive calls, the same disable value will be written to related
* registers, so the PMU state remains unchanged. hw.state in * registers, so the PMU state remains unchanged.
* intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive *
* calls. * intel_bts events don't coexist with intel PMU's BTS events because of
* x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
* disabled around intel PMU's event batching etc, only inside the PMI handler.
*/ */
static void __intel_pmu_disable_all(void) static void __intel_pmu_disable_all(void)
{ {
...@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void) ...@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts(); intel_pmu_disable_bts();
else
intel_bts_disable_local();
intel_pmu_pebs_disable_all(); intel_pmu_pebs_disable_all();
} }
...@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi) ...@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
return; return;
intel_pmu_enable_bts(event->hw.config); intel_pmu_enable_bts(event->hw.config);
} else }
intel_bts_enable_local();
} }
static void intel_pmu_enable_all(int added) static void intel_pmu_enable_all(int added)
...@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
*/ */
if (!x86_pmu.late_ack) if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_bts_disable_local();
__intel_pmu_disable_all(); __intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer(); handled = intel_pmu_drain_bts_buffer();
handled += intel_bts_interrupt(); handled += intel_bts_interrupt();
...@@ -2172,6 +2172,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -2172,6 +2172,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
/* Only restore PMU state when it's active. See x86_pmu_disable(). */ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
if (cpuc->enabled) if (cpuc->enabled)
__intel_pmu_enable_all(0, true); __intel_pmu_enable_all(0, true);
intel_bts_enable_local();
/* /*
* Only unmask the NMI after the overflow counters * Only unmask the NMI after the overflow counters
......
...@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event) ...@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
event->hw.addr_filters = NULL; event->hw.addr_filters = NULL;
} }
static inline bool valid_kernel_ip(unsigned long ip)
{
return virt_addr_valid(ip) && kernel_ip(ip);
}
static int pt_event_addr_filters_validate(struct list_head *filters) static int pt_event_addr_filters_validate(struct list_head *filters)
{ {
struct perf_addr_filter *filter; struct perf_addr_filter *filter;
...@@ -1081,12 +1086,17 @@ static int pt_event_addr_filters_validate(struct list_head *filters) ...@@ -1081,12 +1086,17 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
list_for_each_entry(filter, filters, entry) { list_for_each_entry(filter, filters, entry) {
/* PT doesn't support single address triggers */ /* PT doesn't support single address triggers */
if (!filter->range) if (!filter->range || !filter->size)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!filter->inode && !kernel_ip(filter->offset)) if (!filter->inode) {
if (!valid_kernel_ip(filter->offset))
return -EINVAL; return -EINVAL;
if (!valid_kernel_ip(filter->offset + filter->size))
return -EINVAL;
}
if (++range > pt_cap_get(PT_CAP_num_address_ranges)) if (++range > pt_cap_get(PT_CAP_num_address_ranges))
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event) ...@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
} else { } else {
/* apply the offset */ /* apply the offset */
msr_a = filter->offset + offs[range]; msr_a = filter->offset + offs[range];
msr_b = filter->size + msr_a; msr_b = filter->size + msr_a - 1;
} }
filters->filter[range].msr_a = msr_a; filters->filter[range].msr_a = msr_a;
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
static struct kvm_event_hw_type_mapping amd_event_mapping[] = { static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES }, [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES }, [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment