Commit df1a132b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Introduce struct for sample data

For easy extension of the sample data, put it in a structure.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ea1900e5
...@@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val, ...@@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
* Finally record data if requested. * Finally record data if requested.
*/ */
if (record) { if (record) {
addr = 0; struct perf_sample_data data = {
.regs = regs,
.addr = 0,
};
if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
/* /*
* The user wants a data address recorded. * The user wants a data address recorded.
...@@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val, ...@@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
addr = mfspr(SPRN_SDAR); data.addr = mfspr(SPRN_SDAR);
} }
if (perf_counter_overflow(counter, nmi, regs, addr)) { if (perf_counter_overflow(counter, nmi, &data)) {
/* /*
* Interrupts are coming too fast - throttle them * Interrupts are coming too fast - throttle them
* by setting the counter to 0, so it will be * by setting the counter to 0, so it will be
......
...@@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void) ...@@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void)
*/ */
static int intel_pmu_handle_irq(struct pt_regs *regs) static int intel_pmu_handle_irq(struct pt_regs *regs)
{ {
struct perf_sample_data data;
struct cpu_hw_counters *cpuc; struct cpu_hw_counters *cpuc;
struct cpu_hw_counters;
int bit, cpu, loops; int bit, cpu, loops;
u64 ack, status; u64 ack, status;
data.regs = regs;
data.addr = 0;
cpu = smp_processor_id(); cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu); cpuc = &per_cpu(cpu_hw_counters, cpu);
...@@ -1210,7 +1213,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -1210,7 +1213,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
if (!intel_pmu_save_and_restart(counter)) if (!intel_pmu_save_and_restart(counter))
continue; continue;
if (perf_counter_overflow(counter, 1, regs, 0)) if (perf_counter_overflow(counter, 1, &data))
intel_pmu_disable_counter(&counter->hw, bit); intel_pmu_disable_counter(&counter->hw, bit);
} }
...@@ -1230,12 +1233,16 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -1230,12 +1233,16 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
static int amd_pmu_handle_irq(struct pt_regs *regs) static int amd_pmu_handle_irq(struct pt_regs *regs)
{ {
int cpu, idx, handled = 0; struct perf_sample_data data;
struct cpu_hw_counters *cpuc; struct cpu_hw_counters *cpuc;
struct perf_counter *counter; struct perf_counter *counter;
struct hw_perf_counter *hwc; struct hw_perf_counter *hwc;
int cpu, idx, handled = 0;
u64 val; u64 val;
data.regs = regs;
data.addr = 0;
cpu = smp_processor_id(); cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu); cpuc = &per_cpu(cpu_hw_counters, cpu);
...@@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) ...@@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_counter_set_period(counter, hwc, idx)) if (!x86_perf_counter_set_period(counter, hwc, idx))
continue; continue;
if (perf_counter_overflow(counter, 1, regs, 0)) if (perf_counter_overflow(counter, 1, &data))
amd_pmu_disable_counter(hwc, idx); amd_pmu_disable_counter(hwc, idx);
} }
......
...@@ -605,8 +605,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader, ...@@ -605,8 +605,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_counter_context *ctx, int cpu); struct perf_counter_context *ctx, int cpu);
extern void perf_counter_update_userpage(struct perf_counter *counter); extern void perf_counter_update_userpage(struct perf_counter *counter);
extern int perf_counter_overflow(struct perf_counter *counter, struct perf_sample_data {
int nmi, struct pt_regs *regs, u64 addr); struct pt_regs *regs;
u64 addr;
};
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
struct perf_sample_data *data);
/* /*
* Return 1 for a software counter, 0 for a hardware counter * Return 1 for a software counter, 0 for a hardware counter
*/ */
......
...@@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) ...@@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
return task_pid_nr_ns(p, counter->ns); return task_pid_nr_ns(p, counter->ns);
} }
static void perf_counter_output(struct perf_counter *counter, static void perf_counter_output(struct perf_counter *counter, int nmi,
int nmi, struct pt_regs *regs, u64 addr) struct perf_sample_data *data)
{ {
int ret; int ret;
u64 sample_type = counter->attr.sample_type; u64 sample_type = counter->attr.sample_type;
...@@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter,
header.size = sizeof(header); header.size = sizeof(header);
header.misc = PERF_EVENT_MISC_OVERFLOW; header.misc = PERF_EVENT_MISC_OVERFLOW;
header.misc |= perf_misc_flags(regs); header.misc |= perf_misc_flags(data->regs);
if (sample_type & PERF_SAMPLE_IP) { if (sample_type & PERF_SAMPLE_IP) {
ip = perf_instruction_pointer(regs); ip = perf_instruction_pointer(data->regs);
header.type |= PERF_SAMPLE_IP; header.type |= PERF_SAMPLE_IP;
header.size += sizeof(ip); header.size += sizeof(ip);
} }
...@@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter,
} }
if (sample_type & PERF_SAMPLE_CALLCHAIN) { if (sample_type & PERF_SAMPLE_CALLCHAIN) {
callchain = perf_callchain(regs); callchain = perf_callchain(data->regs);
if (callchain) { if (callchain) {
callchain_size = (1 + callchain->nr) * sizeof(u64); callchain_size = (1 + callchain->nr) * sizeof(u64);
...@@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter,
perf_output_put(&handle, time); perf_output_put(&handle, time);
if (sample_type & PERF_SAMPLE_ADDR) if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(&handle, addr); perf_output_put(&handle, data->addr);
if (sample_type & PERF_SAMPLE_ID) if (sample_type & PERF_SAMPLE_ID)
perf_output_put(&handle, counter->id); perf_output_put(&handle, counter->id);
...@@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) ...@@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
* Generic counter overflow handling. * Generic counter overflow handling.
*/ */
int perf_counter_overflow(struct perf_counter *counter, int perf_counter_overflow(struct perf_counter *counter, int nmi,
int nmi, struct pt_regs *regs, u64 addr) struct perf_sample_data *data)
{ {
int events = atomic_read(&counter->event_limit); int events = atomic_read(&counter->event_limit);
int throttle = counter->pmu->unthrottle != NULL; int throttle = counter->pmu->unthrottle != NULL;
...@@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter, ...@@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter,
perf_counter_disable(counter); perf_counter_disable(counter);
} }
perf_counter_output(counter, nmi, regs, addr); perf_counter_output(counter, nmi, data);
return ret; return ret;
} }
...@@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter) ...@@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{ {
enum hrtimer_restart ret = HRTIMER_RESTART; enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct perf_counter *counter; struct perf_counter *counter;
struct pt_regs *regs;
u64 period; u64 period;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter); counter->pmu->read(counter);
regs = get_irq_regs(); data.addr = 0;
data.regs = get_irq_regs();
/* /*
* In case we exclude kernel IPs or are somehow not in interrupt * In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP. * context, provide the next best thing, the user IP.
*/ */
if ((counter->attr.exclude_kernel || !regs) && if ((counter->attr.exclude_kernel || !data.regs) &&
!counter->attr.exclude_user) !counter->attr.exclude_user)
regs = task_pt_regs(current); data.regs = task_pt_regs(current);
if (regs) { if (data.regs) {
if (perf_counter_overflow(counter, 0, regs, 0)) if (perf_counter_overflow(counter, 0, &data))
ret = HRTIMER_NORESTART; ret = HRTIMER_NORESTART;
} }
...@@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) ...@@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
static void perf_swcounter_overflow(struct perf_counter *counter, static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr) int nmi, struct pt_regs *regs, u64 addr)
{ {
struct perf_sample_data data = {
.regs = regs,
.addr = addr,
};
perf_swcounter_update(counter); perf_swcounter_update(counter);
perf_swcounter_set_period(counter); perf_swcounter_set_period(counter);
if (perf_counter_overflow(counter, nmi, regs, addr)) if (perf_counter_overflow(counter, nmi, &data))
/* soft-disable the counter */ /* soft-disable the counter */
; ;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment