Commit 329c0e01 authored by Frederic Weisbecker's avatar Frederic Weisbecker

perf: Better order flexible and pinned scheduling

When a task gets scheduled in. We don't touch the cpu bound events
so the priority order becomes:

	cpu pinned, cpu flexible, task pinned, task flexible.

So schedule out cpu flexibles when a new task context gets in
and correctly order the groups to schedule in:

	task pinned, cpu flexible, task flexible.

Cpu pinned groups don't need to be touched at this time.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
parent 7defb0f8
...@@ -1362,6 +1362,14 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -1362,6 +1362,14 @@ ctx_sched_in(struct perf_event_context *ctx,
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type);
}
static void task_ctx_sched_in(struct task_struct *task, static void task_ctx_sched_in(struct task_struct *task,
enum event_type_t event_type) enum event_type_t event_type)
{ {
...@@ -1388,15 +1396,27 @@ static void task_ctx_sched_in(struct task_struct *task, ...@@ -1388,15 +1396,27 @@ static void task_ctx_sched_in(struct task_struct *task,
*/ */
void perf_event_task_sched_in(struct task_struct *task) void perf_event_task_sched_in(struct task_struct *task)
{ {
task_ctx_sched_in(task, EVENT_ALL); struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
} struct perf_event_context *ctx = task->perf_event_ctxp;
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, if (likely(!ctx))
enum event_type_t event_type) return;
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type); if (cpuctx->task_ctx == ctx)
return;
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
cpuctx->task_ctx = ctx;
} }
#define MAX_INTERRUPTS (~0ULL) #define MAX_INTERRUPTS (~0ULL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment