Commit dee5d906 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf_counter, x86: remove ack_status() from struct x86_pmu

This function is Intel only and not necessary for AMD cpus.

[ Impact: simplify code ]
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-11-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b7f8859a
...@@ -51,7 +51,6 @@ struct x86_pmu { ...@@ -51,7 +51,6 @@ struct x86_pmu {
int (*handle_irq)(struct pt_regs *, int); int (*handle_irq)(struct pt_regs *, int);
u64 (*save_disable_all)(void); u64 (*save_disable_all)(void);
void (*restore_all)(u64); void (*restore_all)(u64);
void (*ack_status)(u64);
void (*enable)(int, u64); void (*enable)(int, u64);
void (*disable)(int, u64); void (*disable)(int, u64);
unsigned eventsel; unsigned eventsel;
...@@ -415,23 +414,11 @@ static inline u64 intel_pmu_get_status(u64 mask) ...@@ -415,23 +414,11 @@ static inline u64 intel_pmu_get_status(u64 mask)
return status; return status;
} }
static void intel_pmu_ack_status(u64 ack) static inline void intel_pmu_ack_status(u64 ack)
{ {
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
} }
static void amd_pmu_ack_status(u64 ack)
{
}
static void hw_perf_ack_status(u64 ack)
{
if (unlikely(!perf_counters_initialized))
return;
x86_pmu->ack_status(ack);
}
static void intel_pmu_enable_counter(int idx, u64 config) static void intel_pmu_enable_counter(int idx, u64 config)
{ {
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
...@@ -788,7 +775,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -788,7 +775,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
__x86_pmu_disable(counter, &counter->hw, bit); __x86_pmu_disable(counter, &counter->hw, bit);
} }
hw_perf_ack_status(ack); intel_pmu_ack_status(ack);
/* /*
* Repeat if there is more work to be done: * Repeat if there is more work to be done:
...@@ -904,7 +891,6 @@ static struct x86_pmu intel_pmu = { ...@@ -904,7 +891,6 @@ static struct x86_pmu intel_pmu = {
.handle_irq = intel_pmu_handle_irq, .handle_irq = intel_pmu_handle_irq,
.save_disable_all = intel_pmu_save_disable_all, .save_disable_all = intel_pmu_save_disable_all,
.restore_all = intel_pmu_restore_all, .restore_all = intel_pmu_restore_all,
.ack_status = intel_pmu_ack_status,
.enable = intel_pmu_enable_counter, .enable = intel_pmu_enable_counter,
.disable = intel_pmu_disable_counter, .disable = intel_pmu_disable_counter,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
...@@ -918,7 +904,6 @@ static struct x86_pmu amd_pmu = { ...@@ -918,7 +904,6 @@ static struct x86_pmu amd_pmu = {
.handle_irq = amd_pmu_handle_irq, .handle_irq = amd_pmu_handle_irq,
.save_disable_all = amd_pmu_save_disable_all, .save_disable_all = amd_pmu_save_disable_all,
.restore_all = amd_pmu_restore_all, .restore_all = amd_pmu_restore_all,
.ack_status = amd_pmu_ack_status,
.enable = amd_pmu_enable_counter, .enable = amd_pmu_enable_counter,
.disable = amd_pmu_disable_counter, .disable = amd_pmu_disable_counter,
.eventsel = MSR_K7_EVNTSEL0, .eventsel = MSR_K7_EVNTSEL0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment