Commit af1f4045 authored by Thomas Gleixner's avatar Thomas Gleixner

cpu/hotplug: Hand in target state to _cpu_up/down

We want to be able to bringup/teardown the cpu to a particular state. Add a
target argument to _cpu_up/down.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Rik van Riel <riel@redhat.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: http://lkml.kernel.org/r/20160226182340.862113133@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 4baa0afc
...@@ -547,7 +547,8 @@ static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) ...@@ -547,7 +547,8 @@ static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
} }
/* Requires cpu_add_remove_lock to be held */ /* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
enum cpuhp_state target)
{ {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0; int prev_state, ret = 0;
...@@ -564,7 +565,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -564,7 +565,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
cpuhp_tasks_frozen = tasks_frozen; cpuhp_tasks_frozen = tasks_frozen;
prev_state = st->state; prev_state = st->state;
st->target = CPUHP_OFFLINE; st->target = target;
for (; st->state > st->target; st->state--) { for (; st->state > st->target; st->state--) {
struct cpuhp_step *step = cpuhp_bp_states + st->state; struct cpuhp_step *step = cpuhp_bp_states + st->state;
...@@ -584,7 +585,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -584,7 +585,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
return ret; return ret;
} }
int cpu_down(unsigned int cpu) static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
{ {
int err; int err;
...@@ -595,12 +596,16 @@ int cpu_down(unsigned int cpu) ...@@ -595,12 +596,16 @@ int cpu_down(unsigned int cpu)
goto out; goto out;
} }
err = _cpu_down(cpu, 0); err = _cpu_down(cpu, 0, target);
out: out:
cpu_maps_update_done(); cpu_maps_update_done();
return err; return err;
} }
int cpu_down(unsigned int cpu)
{
return do_cpu_down(cpu, CPUHP_OFFLINE);
}
EXPORT_SYMBOL(cpu_down); EXPORT_SYMBOL(cpu_down);
#endif /*CONFIG_HOTPLUG_CPU*/ #endif /*CONFIG_HOTPLUG_CPU*/
...@@ -669,7 +674,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) ...@@ -669,7 +674,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
} }
/* Requires cpu_add_remove_lock to be held */ /* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen) static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{ {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle; struct task_struct *idle;
...@@ -692,7 +697,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen) ...@@ -692,7 +697,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
cpuhp_tasks_frozen = tasks_frozen; cpuhp_tasks_frozen = tasks_frozen;
prev_state = st->state; prev_state = st->state;
st->target = CPUHP_ONLINE; st->target = target;
while (st->state < st->target) { while (st->state < st->target) {
struct cpuhp_step *step; struct cpuhp_step *step;
...@@ -710,7 +715,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen) ...@@ -710,7 +715,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
return ret; return ret;
} }
int cpu_up(unsigned int cpu) static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
{ {
int err = 0; int err = 0;
...@@ -734,12 +739,16 @@ int cpu_up(unsigned int cpu) ...@@ -734,12 +739,16 @@ int cpu_up(unsigned int cpu)
goto out; goto out;
} }
err = _cpu_up(cpu, 0); err = _cpu_up(cpu, 0, target);
out: out:
cpu_maps_update_done(); cpu_maps_update_done();
return err; return err;
} }
int cpu_up(unsigned int cpu)
{
return do_cpu_up(cpu, CPUHP_ONLINE);
}
EXPORT_SYMBOL_GPL(cpu_up); EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP #ifdef CONFIG_PM_SLEEP_SMP
...@@ -762,7 +771,7 @@ int disable_nonboot_cpus(void) ...@@ -762,7 +771,7 @@ int disable_nonboot_cpus(void)
if (cpu == first_cpu) if (cpu == first_cpu)
continue; continue;
trace_suspend_resume(TPS("CPU_OFF"), cpu, true); trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
error = _cpu_down(cpu, 1); error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false); trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error) if (!error)
cpumask_set_cpu(cpu, frozen_cpus); cpumask_set_cpu(cpu, frozen_cpus);
...@@ -812,7 +821,7 @@ void enable_nonboot_cpus(void) ...@@ -812,7 +821,7 @@ void enable_nonboot_cpus(void)
for_each_cpu(cpu, frozen_cpus) { for_each_cpu(cpu, frozen_cpus) {
trace_suspend_resume(TPS("CPU_ON"), cpu, true); trace_suspend_resume(TPS("CPU_ON"), cpu, true);
error = _cpu_up(cpu, 1); error = _cpu_up(cpu, 1, CPUHP_ONLINE);
trace_suspend_resume(TPS("CPU_ON"), cpu, false); trace_suspend_resume(TPS("CPU_ON"), cpu, false);
if (!error) { if (!error) {
pr_info("CPU%d is up\n", cpu); pr_info("CPU%d is up\n", cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment