Commit 9ed6060d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Unindent labels

Fixup random annoying style bits.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b0a873eb
...@@ -147,7 +147,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) ...@@ -147,7 +147,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
struct perf_event_context *ctx; struct perf_event_context *ctx;
rcu_read_lock(); rcu_read_lock();
retry: retry:
ctx = rcu_dereference(task->perf_event_ctxp); ctx = rcu_dereference(task->perf_event_ctxp);
if (ctx) { if (ctx) {
/* /*
...@@ -619,7 +619,7 @@ void perf_event_disable(struct perf_event *event) ...@@ -619,7 +619,7 @@ void perf_event_disable(struct perf_event *event)
return; return;
} }
retry: retry:
task_oncpu_function_call(task, __perf_event_disable, event); task_oncpu_function_call(task, __perf_event_disable, event);
raw_spin_lock_irq(&ctx->lock); raw_spin_lock_irq(&ctx->lock);
...@@ -849,7 +849,7 @@ static void __perf_install_in_context(void *info) ...@@ -849,7 +849,7 @@ static void __perf_install_in_context(void *info)
if (!err && !ctx->task && cpuctx->max_pertask) if (!err && !ctx->task && cpuctx->max_pertask)
cpuctx->max_pertask--; cpuctx->max_pertask--;
unlock: unlock:
perf_enable(); perf_enable();
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
...@@ -922,10 +922,12 @@ static void __perf_event_mark_enabled(struct perf_event *event, ...@@ -922,10 +922,12 @@ static void __perf_event_mark_enabled(struct perf_event *event,
event->state = PERF_EVENT_STATE_INACTIVE; event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = ctx->time - event->total_time_enabled; event->tstamp_enabled = ctx->time - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled = sub->tstamp_enabled =
ctx->time - sub->total_time_enabled; ctx->time - sub->total_time_enabled;
}
}
} }
/* /*
...@@ -991,7 +993,7 @@ static void __perf_event_enable(void *info) ...@@ -991,7 +993,7 @@ static void __perf_event_enable(void *info)
} }
} }
unlock: unlock:
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -1032,7 +1034,7 @@ void perf_event_enable(struct perf_event *event) ...@@ -1032,7 +1034,7 @@ void perf_event_enable(struct perf_event *event)
if (event->state == PERF_EVENT_STATE_ERROR) if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF; event->state = PERF_EVENT_STATE_OFF;
retry: retry:
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_event_enable, event); task_oncpu_function_call(task, __perf_event_enable, event);
...@@ -1052,7 +1054,7 @@ void perf_event_enable(struct perf_event *event) ...@@ -1052,7 +1054,7 @@ void perf_event_enable(struct perf_event *event)
if (event->state == PERF_EVENT_STATE_OFF) if (event->state == PERF_EVENT_STATE_OFF)
__perf_event_mark_enabled(event, ctx); __perf_event_mark_enabled(event, ctx);
out: out:
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
} }
...@@ -1092,17 +1094,19 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -1092,17 +1094,19 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (!ctx->nr_active) if (!ctx->nr_active)
goto out_enable; goto out_enable;
if (event_type & EVENT_PINNED) if (event_type & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry) list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx); group_sched_out(event, cpuctx, ctx);
}
if (event_type & EVENT_FLEXIBLE) if (event_type & EVENT_FLEXIBLE) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry) list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx); group_sched_out(event, cpuctx, ctx);
}
out_enable: out_enable:
perf_enable(); perf_enable();
out: out:
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -1341,9 +1345,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, ...@@ -1341,9 +1345,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
if (event->cpu != -1 && event->cpu != smp_processor_id()) if (event->cpu != -1 && event->cpu != smp_processor_id())
continue; continue;
if (group_can_go_on(event, cpuctx, can_add_hw)) if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx)) if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0; can_add_hw = 0;
}
} }
} }
...@@ -1373,7 +1378,7 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -1373,7 +1378,7 @@ ctx_sched_in(struct perf_event_context *ctx,
ctx_flexible_sched_in(ctx, cpuctx); ctx_flexible_sched_in(ctx, cpuctx);
perf_enable(); perf_enable();
out: out:
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -1714,7 +1719,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) ...@@ -1714,7 +1719,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
perf_event_task_sched_in(task); perf_event_task_sched_in(task);
out: out:
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -2053,7 +2058,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) ...@@ -2053,7 +2058,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
if (!ptrace_may_access(task, PTRACE_MODE_READ)) if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto errout; goto errout;
retry: retry:
ctx = perf_lock_task_context(task, &flags); ctx = perf_lock_task_context(task, &flags);
if (ctx) { if (ctx) {
unclone_ctx(ctx); unclone_ctx(ctx);
...@@ -2081,7 +2086,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) ...@@ -2081,7 +2086,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
put_task_struct(task); put_task_struct(task);
return ctx; return ctx;
errout: errout:
put_task_struct(task); put_task_struct(task);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -3264,7 +3269,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -3264,7 +3269,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
if (handle->wakeup != local_read(&buffer->wakeup)) if (handle->wakeup != local_read(&buffer->wakeup))
perf_output_wakeup(handle); perf_output_wakeup(handle);
out: out:
preempt_enable(); preempt_enable();
} }
...@@ -4562,7 +4567,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) ...@@ -4562,7 +4567,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
rcu_assign_pointer(cpuctx->swevent_hlist, hlist); rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
} }
cpuctx->hlist_refcount++; cpuctx->hlist_refcount++;
exit: exit:
mutex_unlock(&cpuctx->hlist_mutex); mutex_unlock(&cpuctx->hlist_mutex);
return err; return err;
...@@ -4587,7 +4592,7 @@ static int swevent_hlist_get(struct perf_event *event) ...@@ -4587,7 +4592,7 @@ static int swevent_hlist_get(struct perf_event *event)
put_online_cpus(); put_online_cpus();
return 0; return 0;
fail: fail:
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == failed_cpu) if (cpu == failed_cpu)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment