Commit 4158755d authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar

perf_events: Add perf_event_time()

Adds perf_event_time() to try and centralize access to event
timing and in particular ctx->time. Prepares for cgroup support.
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d22059c.122ae30a.5e0e.ffff8b8b@mx.google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 5632ab12
...@@ -268,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx) ...@@ -268,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx)
ctx->timestamp = now; ctx->timestamp = now;
} }
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
return ctx ? ctx->time : 0;
}
/* /*
* Update the total_time_enabled and total_time_running fields for a event. * Update the total_time_enabled and total_time_running fields for a event.
*/ */
...@@ -281,7 +287,7 @@ static void update_event_times(struct perf_event *event) ...@@ -281,7 +287,7 @@ static void update_event_times(struct perf_event *event)
return; return;
if (ctx->is_active) if (ctx->is_active)
run_end = ctx->time; run_end = perf_event_time(event);
else else
run_end = event->tstamp_stopped; run_end = event->tstamp_stopped;
...@@ -290,7 +296,7 @@ static void update_event_times(struct perf_event *event) ...@@ -290,7 +296,7 @@ static void update_event_times(struct perf_event *event)
if (event->state == PERF_EVENT_STATE_INACTIVE) if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped; run_end = event->tstamp_stopped;
else else
run_end = ctx->time; run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running; event->total_time_running = run_end - event->tstamp_running;
} }
...@@ -546,6 +552,7 @@ event_sched_out(struct perf_event *event, ...@@ -546,6 +552,7 @@ event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
u64 tstamp = perf_event_time(event);
u64 delta; u64 delta;
/* /*
* An event which could not be activated because of * An event which could not be activated because of
...@@ -557,7 +564,7 @@ event_sched_out(struct perf_event *event, ...@@ -557,7 +564,7 @@ event_sched_out(struct perf_event *event,
&& !event_filter_match(event)) { && !event_filter_match(event)) {
delta = ctx->time - event->tstamp_stopped; delta = ctx->time - event->tstamp_stopped;
event->tstamp_running += delta; event->tstamp_running += delta;
event->tstamp_stopped = ctx->time; event->tstamp_stopped = tstamp;
} }
if (event->state != PERF_EVENT_STATE_ACTIVE) if (event->state != PERF_EVENT_STATE_ACTIVE)
...@@ -568,7 +575,7 @@ event_sched_out(struct perf_event *event, ...@@ -568,7 +575,7 @@ event_sched_out(struct perf_event *event,
event->pending_disable = 0; event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF; event->state = PERF_EVENT_STATE_OFF;
} }
event->tstamp_stopped = ctx->time; event->tstamp_stopped = tstamp;
event->pmu->del(event, 0); event->pmu->del(event, 0);
event->oncpu = -1; event->oncpu = -1;
...@@ -780,6 +787,8 @@ event_sched_in(struct perf_event *event, ...@@ -780,6 +787,8 @@ event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
u64 tstamp = perf_event_time(event);
if (event->state <= PERF_EVENT_STATE_OFF) if (event->state <= PERF_EVENT_STATE_OFF)
return 0; return 0;
...@@ -796,9 +805,9 @@ event_sched_in(struct perf_event *event, ...@@ -796,9 +805,9 @@ event_sched_in(struct perf_event *event,
return -EAGAIN; return -EAGAIN;
} }
event->tstamp_running += ctx->time - event->tstamp_stopped; event->tstamp_running += tstamp - event->tstamp_stopped;
event->shadow_ctx_time = ctx->time - ctx->timestamp; event->shadow_ctx_time = tstamp - ctx->timestamp;
if (!is_software_event(event)) if (!is_software_event(event))
cpuctx->active_oncpu++; cpuctx->active_oncpu++;
...@@ -910,11 +919,13 @@ static int group_can_go_on(struct perf_event *event, ...@@ -910,11 +919,13 @@ static int group_can_go_on(struct perf_event *event,
static void add_event_to_ctx(struct perf_event *event, static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx); list_add_event(event, ctx);
perf_group_attach(event); perf_group_attach(event);
event->tstamp_enabled = ctx->time; event->tstamp_enabled = tstamp;
event->tstamp_running = ctx->time; event->tstamp_running = tstamp;
event->tstamp_stopped = ctx->time; event->tstamp_stopped = tstamp;
} }
/* /*
...@@ -1054,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event, ...@@ -1054,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
struct perf_event *sub; struct perf_event *sub;
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE; event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = ctx->time - event->total_time_enabled; event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) { list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) { if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled = sub->tstamp_enabled = tstamp - sub->total_time_enabled;
ctx->time - sub->total_time_enabled;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment