Commit 419123f9 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/spinlock: do not yield to a CPU in udelay/mdelay

It does not make sense to try to relinquish the time slice with diag 0x9c
to a CPU in a state that does not allow to schedule the CPU. The scenario
where this can happen is a CPU waiting in udelay/mdelay while holding a
spin-lock.

Add a CIF bit to tag a CPU in enabled wait and use it to detect that the
yield of a CPU will not be successful and skip the diagnose call.
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent c6f70d3b
...@@ -18,12 +18,14 @@ ...@@ -18,12 +18,14 @@
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */ #define CIF_FPU 3 /* restore FPU registers */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ #define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE _BITUL(CIF_ASCE) #define _CIF_ASCE _BITUL(CIF_ASCE)
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
#define _CIF_FPU _BITUL(CIF_FPU) #define _CIF_FPU _BITUL(CIF_FPU)
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -52,6 +54,16 @@ static inline int test_cpu_flag(int flag) ...@@ -52,6 +54,16 @@ static inline int test_cpu_flag(int flag)
return !!(S390_lowcore.cpu_flags & (1UL << flag)); return !!(S390_lowcore.cpu_flags & (1UL << flag));
} }
/*
* Test CIF flag of another CPU. The caller needs to ensure that
* CPU hotplug can not happen, e.g. by disabling preemption.
*/
static inline int test_cpu_flag_of(int flag, int cpu)
{
struct _lowcore *lc = lowcore_ptr[cpu];
return !!(lc->cpu_flags & (1UL << flag));
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
/* /*
......
...@@ -764,6 +764,7 @@ ENTRY(psw_idle) ...@@ -764,6 +764,7 @@ ENTRY(psw_idle)
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
.Lpsw_idle_stcctm: .Lpsw_idle_stcctm:
#endif #endif
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
STCK __CLOCK_IDLE_ENTER(%r2) STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw: .Lpsw_idle_lpsw:
...@@ -1146,6 +1147,7 @@ cleanup_critical: ...@@ -1146,6 +1147,7 @@ cleanup_critical:
.quad .Lio_done - 4 .quad .Lio_done - 4
.Lcleanup_idle: .Lcleanup_idle:
ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
# copy interrupt clock & cpu timer # copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
......
...@@ -37,6 +37,15 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) ...@@ -37,6 +37,15 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
} }
static inline int cpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
return 0;
if (smp_vcpu_scheduled(cpu))
return 0;
return 1;
}
void arch_spin_lock_wait(arch_spinlock_t *lp) void arch_spin_lock_wait(arch_spinlock_t *lp)
{ {
unsigned int cpu = SPINLOCK_LOCKVAL; unsigned int cpu = SPINLOCK_LOCKVAL;
...@@ -53,7 +62,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) ...@@ -53,7 +62,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue; continue;
} }
/* First iteration: check if the lock owner is running. */ /* First iteration: check if the lock owner is running. */
if (first_diag && !smp_vcpu_scheduled(~owner)) { if (first_diag && cpu_is_preempted(~owner)) {
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
first_diag = 0; first_diag = 0;
continue; continue;
...@@ -72,7 +81,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) ...@@ -72,7 +81,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
* yield the CPU unconditionally. For LPAR rely on the * yield the CPU unconditionally. For LPAR rely on the
* sense running status. * sense running status.
*/ */
if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) { if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
first_diag = 0; first_diag = 0;
} }
...@@ -98,7 +107,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -98,7 +107,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags); local_irq_restore(flags);
} }
/* Check if the lock owner is running. */ /* Check if the lock owner is running. */
if (first_diag && !smp_vcpu_scheduled(~owner)) { if (first_diag && cpu_is_preempted(~owner)) {
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
first_diag = 0; first_diag = 0;
continue; continue;
...@@ -117,7 +126,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -117,7 +126,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
* yield the CPU unconditionally. For LPAR rely on the * yield the CPU unconditionally. For LPAR rely on the
* sense running status. * sense running status.
*/ */
if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) { if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
first_diag = 0; first_diag = 0;
} }
...@@ -155,7 +164,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) ...@@ -155,7 +164,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
owner = 0; owner = 0;
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
if (owner && !smp_vcpu_scheduled(~owner)) if (owner && cpu_is_preempted(~owner))
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
count = spin_retry; count = spin_retry;
} }
...@@ -201,7 +210,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) ...@@ -201,7 +210,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
owner = 0; owner = 0;
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
if (owner && !smp_vcpu_scheduled(~owner)) if (owner && cpu_is_preempted(~owner))
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
count = spin_retry; count = spin_retry;
} }
...@@ -231,7 +240,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) ...@@ -231,7 +240,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
owner = 0; owner = 0;
while (1) { while (1) {
if (count-- <= 0) { if (count-- <= 0) {
if (owner && !smp_vcpu_scheduled(~owner)) if (owner && cpu_is_preempted(~owner))
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
count = spin_retry; count = spin_retry;
} }
...@@ -275,7 +284,7 @@ void arch_lock_relax(unsigned int cpu) ...@@ -275,7 +284,7 @@ void arch_lock_relax(unsigned int cpu)
{ {
if (!cpu) if (!cpu)
return; return;
if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
return; return;
smp_yield_cpu(~cpu); smp_yield_cpu(~cpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment