Commit 8536e02e authored by Vijay Kumar's avatar Vijay Kumar Committed by David S. Miller

sparc64: Use CPU_POKE to resume idle cpu

Use CPU_POKE hypervisor call to resume idle cpu if supported.
Signed-off-by: default avatarVijay Kumar <vijay.ac.kumar@oracle.com>
Reviewed-by: default avatarAnthony Yznaga <anthony.yznaga@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 28d43de7
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern cpumask_t cpu_core_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS];
void smp_init_cpu_poke(void);
void scheduler_poke(void);
void arch_send_call_function_single_ipi(int cpu); void arch_send_call_function_single_ipi(int cpu);
void arch_send_call_function_ipi_mask(const struct cpumask *mask); void arch_send_call_function_ipi_mask(const struct cpumask *mask);
...@@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu); ...@@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu);
#define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0) #define smp_fetch_global_pmu() do { } while (0)
#define smp_fill_in_cpu_possible_map() do { } while (0) #define smp_fill_in_cpu_possible_map() do { } while (0)
#define smp_init_cpu_poke() do { } while (0)
#define scheduler_poke() do { } while (0)
#endif /* !(CONFIG_SMP) */ #endif /* !(CONFIG_SMP) */
......
...@@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void) ...@@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void)
group = HV_GRP_CORE; group = HV_GRP_CORE;
major = 1; major = 1;
minor = 1; minor = 6;
if (sun4v_hvapi_register(group, major, &minor)) if (sun4v_hvapi_register(group, major, &minor))
goto bad; goto bad;
......
...@@ -77,8 +77,13 @@ void arch_cpu_idle(void) ...@@ -77,8 +77,13 @@ void arch_cpu_idle(void)
: "=&r" (pstate) : "=&r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
if (!need_resched() && !cpu_is_offline(smp_processor_id())) if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
sun4v_cpu_yield(); sun4v_cpu_yield();
/* If resumed by cpu_poke then we need to explicitly
* call scheduler_ipi().
*/
scheduler_poke();
}
/* Re-enable interrupts. */ /* Re-enable interrupts. */
__asm__ __volatile__( __asm__ __volatile__(
......
...@@ -356,6 +356,7 @@ void __init start_early_boot(void) ...@@ -356,6 +356,7 @@ void __init start_early_boot(void)
check_if_starfire(); check_if_starfire();
per_cpu_patch(); per_cpu_patch();
sun4v_patch(); sun4v_patch();
smp_init_cpu_poke();
cpu = hard_smp_processor_id(); cpu = hard_smp_processor_id();
if (cpu >= NR_CPUS) { if (cpu >= NR_CPUS) {
......
...@@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map); ...@@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask; static cpumask_t smp_commenced_mask;
static DEFINE_PER_CPU(bool, poke);
static bool cpu_poke;
void smp_info(struct seq_file *m) void smp_info(struct seq_file *m)
{ {
int i; int i;
...@@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus)
{ {
} }
static void send_cpu_ipi(int cpu)
{
xcall_deliver((u64) &xcall_receive_signal,
0, 0, cpumask_of(cpu));
}
void scheduler_poke(void)
{
if (!cpu_poke)
return;
if (!__this_cpu_read(poke))
return;
__this_cpu_write(poke, false);
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
}
static unsigned long send_cpu_poke(int cpu)
{
unsigned long hv_err;
per_cpu(poke, cpu) = true;
hv_err = sun4v_cpu_poke(cpu);
if (hv_err != HV_EOK) {
per_cpu(poke, cpu) = false;
pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
__func__, hv_err);
}
return hv_err;
}
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
if (cpu == smp_processor_id()) { if (cpu == smp_processor_id()) {
WARN_ON_ONCE(preemptible()); WARN_ON_ONCE(preemptible());
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
} else { return;
xcall_deliver((u64) &xcall_receive_signal, }
0, 0, cpumask_of(cpu));
/* Use cpu poke to resume idle cpu if supported. */
if (cpu_poke && idle_cpu(cpu)) {
unsigned long ret;
ret = send_cpu_poke(cpu);
if (ret == HV_EOK)
return;
} }
/* Use IPI in following cases:
* - cpu poke not supported
* - cpu not idle
* - send_cpu_poke() returns with error
*/
send_cpu_ipi(cpu);
}
void smp_init_cpu_poke(void)
{
unsigned long major;
unsigned long minor;
int ret;
if (tlb_type != hypervisor)
return;
ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
if (ret) {
pr_debug("HV_GRP_CORE is not registered\n");
return;
}
if (major == 1 && minor >= 6) {
/* CPU POKE is registered. */
cpu_poke = true;
return;
}
pr_debug("CPU_POKE not supported\n");
} }
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment