Commit 5389e239 authored by David S. Miller's avatar David S. Miller

Merge branch 'sparc64-Use-low-latency-path-to-resume-idle-cpu'

Vijay Kumar says:

====================
sparc64: Use low latency path to resume idle cpu

CPU_POKE is a low latency path to resume the target cpu if suspended
using CPU_YIELD. Use CPU_POKE to resume cpu if supported by hypervisor.

	     Hackbench results (lower is better):
Number of
Process:		w/o fix		with fix
1  			0.012		 0.010
10			0.021		 0.019
100			0.151		 0.148

Changelog:
v2:
  - Fixed comments and spacing (2/2)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 99274b81 8536e02e
......@@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid);
unsigned long sun4v_cpu_yield(void);
#endif
/* cpu_poke()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_CPU_POKE
* RET0: status
* ERRORS: ENOCPU cpuid refers to a CPU that does not exist
* EINVAL cpuid is current CPU
*
* Poke CPU cpuid. If the target CPU is currently suspended having
* invoked the cpu-yield service, that vCPU will be resumed.
* Poke interrupts may only be sent to valid, non-local CPUs.
* It is not legal to poke the current vCPU.
*/
#define HV_FAST_CPU_POKE 0x13
#ifndef __ASSEMBLY__
unsigned long sun4v_cpu_poke(unsigned long cpuid);
#endif
/* cpu_qconf()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_CPU_QCONF
......
......@@ -33,6 +33,9 @@
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern cpumask_t cpu_core_map[NR_CPUS];
void smp_init_cpu_poke(void);
void scheduler_poke(void);
void arch_send_call_function_single_ipi(int cpu);
void arch_send_call_function_ipi_mask(const struct cpumask *mask);
......@@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu);
#define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0)
#define smp_fill_in_cpu_possible_map() do { } while (0)
#define smp_init_cpu_poke() do { } while (0)
#define scheduler_poke() do { } while (0)
#endif /* !(CONFIG_SMP) */
......
......@@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void)
group = HV_GRP_CORE;
major = 1;
minor = 1;
minor = 6;
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
......
......@@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield)
nop
ENDPROC(sun4v_cpu_yield)
/* %o0: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_poke)
mov HV_FAST_CPU_POKE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_poke)
/* %o0: type
* %o1: queue paddr
* %o2: num queue entries
......
......@@ -77,8 +77,13 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
if (!need_resched() && !cpu_is_offline(smp_processor_id()))
if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
sun4v_cpu_yield();
/* If resumed by cpu_poke then we need to explicitly
* call scheduler_ipi().
*/
scheduler_poke();
}
/* Re-enable interrupts. */
__asm__ __volatile__(
......
......@@ -356,6 +356,7 @@ void __init start_early_boot(void)
check_if_starfire();
per_cpu_patch();
sun4v_patch();
smp_init_cpu_poke();
cpu = hard_smp_processor_id();
if (cpu >= NR_CPUS) {
......
......@@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
static DEFINE_PER_CPU(bool, poke);
static bool cpu_poke;
void smp_info(struct seq_file *m)
{
int i;
......@@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
}
static void send_cpu_ipi(int cpu)
{
xcall_deliver((u64) &xcall_receive_signal,
0, 0, cpumask_of(cpu));
}
void scheduler_poke(void)
{
if (!cpu_poke)
return;
if (!__this_cpu_read(poke))
return;
__this_cpu_write(poke, false);
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
}
static unsigned long send_cpu_poke(int cpu)
{
unsigned long hv_err;
per_cpu(poke, cpu) = true;
hv_err = sun4v_cpu_poke(cpu);
if (hv_err != HV_EOK) {
per_cpu(poke, cpu) = false;
pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
__func__, hv_err);
}
return hv_err;
}
void smp_send_reschedule(int cpu)
{
if (cpu == smp_processor_id()) {
WARN_ON_ONCE(preemptible());
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
} else {
xcall_deliver((u64) &xcall_receive_signal,
0, 0, cpumask_of(cpu));
return;
}
/* Use cpu poke to resume idle cpu if supported. */
if (cpu_poke && idle_cpu(cpu)) {
unsigned long ret;
ret = send_cpu_poke(cpu);
if (ret == HV_EOK)
return;
}
/* Use IPI in following cases:
* - cpu poke not supported
* - cpu not idle
* - send_cpu_poke() returns with error
*/
send_cpu_ipi(cpu);
}
void smp_init_cpu_poke(void)
{
unsigned long major;
unsigned long minor;
int ret;
if (tlb_type != hypervisor)
return;
ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
if (ret) {
pr_debug("HV_GRP_CORE is not registered\n");
return;
}
if (major == 1 && minor >= 6) {
/* CPU POKE is registered. */
cpu_poke = true;
return;
}
pr_debug("CPU_POKE not supported\n");
}
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment