Commit 453e4108 authored by Vincent Donnefort's avatar Vincent Donnefort Committed by Ingo Molnar

cpu/hotplug: Add cpuhp_invoke_callback_range()

Factorizing and unifying cpuhp callback range invocations, especially for
the hotunplug path, where two different ways of decrementing were used. The
first one, decrements before the callback is called:

 cpuhp_thread_fun()
     state = st->state;
     st->state--;
     cpuhp_invoke_callback(state);

The second one, after:

 take_down_cpu()|cpuhp_down_callbacks()
     cpuhp_invoke_callback(st->state);
     st->state--;

This is problematic for rolling back the steps in case of error, as
depending on the decrement, the rollback will start from N or N-1. It also
makes tracing inconsistent, between steps run in the cpuhp thread and
the others.

Additionally, avoid useless cpuhp_thread_fun() loops by skipping empty
steps.
Signed-off-by: default avatarVincent Donnefort <vincent.donnefort@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20210216103506.416286-4-vincent.donnefort@arm.com
parent 62f25069
...@@ -135,6 +135,11 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) ...@@ -135,6 +135,11 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
return cpuhp_hp_states + state; return cpuhp_hp_states + state;
} }
static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
{
return bringup ? !step->startup.single : !step->teardown.single;
}
/** /**
* cpuhp_invoke_callback _ Invoke the callbacks for a given state * cpuhp_invoke_callback _ Invoke the callbacks for a given state
* @cpu: The cpu for which the callback should be invoked * @cpu: The cpu for which the callback should be invoked
...@@ -157,26 +162,24 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, ...@@ -157,26 +162,24 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
if (st->fail == state) { if (st->fail == state) {
st->fail = CPUHP_INVALID; st->fail = CPUHP_INVALID;
if (!(bringup ? step->startup.single : step->teardown.single))
return 0;
return -EAGAIN; return -EAGAIN;
} }
if (cpuhp_step_empty(bringup, step)) {
WARN_ON_ONCE(1);
return 0;
}
if (!step->multi_instance) { if (!step->multi_instance) {
WARN_ON_ONCE(lastp && *lastp); WARN_ON_ONCE(lastp && *lastp);
cb = bringup ? step->startup.single : step->teardown.single; cb = bringup ? step->startup.single : step->teardown.single;
if (!cb)
return 0;
trace_cpuhp_enter(cpu, st->target, state, cb); trace_cpuhp_enter(cpu, st->target, state, cb);
ret = cb(cpu); ret = cb(cpu);
trace_cpuhp_exit(cpu, st->state, state, ret); trace_cpuhp_exit(cpu, st->state, state, ret);
return ret; return ret;
} }
cbm = bringup ? step->startup.multi : step->teardown.multi; cbm = bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return 0;
/* Single invocation for instance add/remove */ /* Single invocation for instance add/remove */
if (node) { if (node) {
...@@ -475,6 +478,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) ...@@ -475,6 +478,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
static inline void static inline void
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
{ {
st->target = prev_state;
/*
* Already rolling back. No need invert the bringup value or to change
* the current state.
*/
if (st->rollback)
return;
st->rollback = true; st->rollback = true;
/* /*
...@@ -488,7 +500,6 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) ...@@ -488,7 +500,6 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
st->state++; st->state++;
} }
st->target = prev_state;
st->bringup = !st->bringup; st->bringup = !st->bringup;
} }
...@@ -591,10 +602,53 @@ static int finish_cpu(unsigned int cpu) ...@@ -591,10 +602,53 @@ static int finish_cpu(unsigned int cpu)
* Hotplug state machine related functions * Hotplug state machine related functions
*/ */
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) /*
* Get the next state to run. Empty ones will be skipped. Returns true if a
* state must be run.
*
* st->state will be modified ahead of time, to match state_to_run, as if it
* has already ran.
*/
static bool cpuhp_next_state(bool bringup,
enum cpuhp_state *state_to_run,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ {
for (st->state--; st->state > st->target; st->state--) do {
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); if (bringup) {
if (st->state >= target)
return false;
*state_to_run = ++st->state;
} else {
if (st->state <= target)
return false;
*state_to_run = st->state--;
}
if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
break;
} while (true);
return true;
}
static int cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state state;
int err = 0;
while (cpuhp_next_state(bringup, &state, st, target)) {
err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
if (err)
break;
}
return err;
} }
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
...@@ -617,16 +671,12 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, ...@@ -617,16 +671,12 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state prev_state = st->state; enum cpuhp_state prev_state = st->state;
int ret = 0; int ret = 0;
while (st->state < target) { ret = cpuhp_invoke_callback_range(true, cpu, st, target);
st->state++; if (ret) {
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); cpuhp_reset_state(st, prev_state);
if (ret) { if (can_rollback_cpu(st))
if (can_rollback_cpu(st)) { WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
st->target = prev_state; prev_state));
undo_cpu_up(cpu, st);
}
break;
}
} }
return ret; return ret;
} }
...@@ -690,17 +740,9 @@ static void cpuhp_thread_fun(unsigned int cpu) ...@@ -690,17 +740,9 @@ static void cpuhp_thread_fun(unsigned int cpu)
state = st->cb_state; state = st->cb_state;
st->should_run = false; st->should_run = false;
} else { } else {
if (bringup) { st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
st->state++; if (!st->should_run)
state = st->state; goto end;
st->should_run = (st->state < st->target);
WARN_ON_ONCE(st->state > st->target);
} else {
state = st->state;
st->state--;
st->should_run = (st->state > st->target);
WARN_ON_ONCE(st->state < st->target);
}
} }
WARN_ON_ONCE(!cpuhp_is_ap_state(state)); WARN_ON_ONCE(!cpuhp_is_ap_state(state));
...@@ -728,6 +770,7 @@ static void cpuhp_thread_fun(unsigned int cpu) ...@@ -728,6 +770,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
st->should_run = false; st->should_run = false;
} }
end:
cpuhp_lock_release(bringup); cpuhp_lock_release(bringup);
lockdep_release_cpus_lock(); lockdep_release_cpus_lock();
...@@ -881,19 +924,18 @@ static int take_cpu_down(void *_param) ...@@ -881,19 +924,18 @@ static int take_cpu_down(void *_param)
return err; return err;
/* /*
* We get here while we are in CPUHP_TEARDOWN_CPU state and we must not * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
* do this step again. * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
*/ */
WARN_ON(st->state != CPUHP_TEARDOWN_CPU); WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
st->state--;
/* Invoke the former CPU_DYING callbacks */ /* Invoke the former CPU_DYING callbacks */
for (; st->state > target; st->state--) { ret = cpuhp_invoke_callback_range(false, cpu, st, target);
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
/* /*
* DYING must not fail! * DYING must not fail!
*/ */
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
}
/* Give up timekeeping duties */ /* Give up timekeeping duties */
tick_handover_do_timer(); tick_handover_do_timer();
...@@ -975,27 +1017,22 @@ void cpuhp_report_idle_dead(void) ...@@ -975,27 +1017,22 @@ void cpuhp_report_idle_dead(void)
cpuhp_complete_idle_dead, st, 0); cpuhp_complete_idle_dead, st, 0);
} }
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state++; st->state < st->target; st->state++)
cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target) enum cpuhp_state target)
{ {
enum cpuhp_state prev_state = st->state; enum cpuhp_state prev_state = st->state;
int ret = 0; int ret = 0;
for (; st->state > target; st->state--) { ret = cpuhp_invoke_callback_range(false, cpu, st, target);
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); if (ret) {
if (ret) {
st->target = prev_state; cpuhp_reset_state(st, prev_state);
if (st->state < prev_state)
undo_cpu_down(cpu, st); if (st->state < prev_state)
break; WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
} prev_state));
} }
return ret; return ret;
} }
...@@ -1168,14 +1205,12 @@ void notify_cpu_starting(unsigned int cpu) ...@@ -1168,14 +1205,12 @@ void notify_cpu_starting(unsigned int cpu)
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
cpumask_set_cpu(cpu, &cpus_booted_once_mask); cpumask_set_cpu(cpu, &cpus_booted_once_mask);
while (st->state < target) { ret = cpuhp_invoke_callback_range(true, cpu, st, target);
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); /*
/* * STARTING must not fail!
* STARTING must not fail! */
*/ WARN_ON_ONCE(ret);
WARN_ON_ONCE(ret);
}
} }
/* /*
...@@ -1781,8 +1816,7 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, ...@@ -1781,8 +1816,7 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
* If there's nothing to do, we done. * If there's nothing to do, we done.
* Relies on the union for multi_instance. * Relies on the union for multi_instance.
*/ */
if ((bringup && !sp->startup.single) || if (cpuhp_step_empty(bringup, sp))
(!bringup && !sp->teardown.single))
return 0; return 0;
/* /*
* The non AP bound callbacks can fail on bringup. On teardown * The non AP bound callbacks can fail on bringup. On teardown
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment