Commit daadb3bd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Borislav Petkov:
 "Lots of cleanups and preparation. Highlights:

   - futex: Cleanup and remove runtime futex_cmpxchg detection

   - rtmutex: Some fixes for the PREEMPT_RT locking infrastructure

   - kcsan: Share owner_on_cpu() between mutex,rtmutex and rwsem and
     annotate the racy owner->on_cpu access *once*.

   - atomic64: Dead-Code-Elemination"

[ Description above by Peter Zijlstra ]

* tag 'locking_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/atomic: atomic64: Remove unusable atomic ops
  futex: Fix additional regressions
  locking: Allow to include asm/spinlock_types.h from linux/spinlock_types_raw.h
  x86/mm: Include spinlock_t definition in pgtable.
  locking: Mark racy reads of owner->on_cpu
  locking: Make owner_on_cpu() into <linux/sched.h>
  lockdep/selftests: Adapt ww-tests for PREEMPT_RT
  lockdep/selftests: Skip the softirq related tests on PREEMPT_RT
  lockdep/selftests: Unbalanced migrate_disable() & rcu_read_lock().
  lockdep/selftests: Avoid using local_lock_{acquire|release}().
  lockdep: Remove softirq accounting on PREEMPT_RT.
  locking/rtmutex: Add rt_mutex_lock_nest_lock() and rt_mutex_lock_killable().
  locking/rtmutex: Squash self-deadlock check for ww_rt_mutex.
  locking: Remove rt_rwlock_is_contended().
  sched: Trigger warning if ->migration_disabled counter underflows.
  futex: Fix sparc32/m68k/nds32 build regression
  futex: Remove futex_cmpxchg detection
  futex: Ensure futex_atomic_cmpxchg_inatomic() is present
  kernel/locking: Use a pointer in ww_mutex_trylock().
parents 6ae71436 f16cc980
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ALPHA_SPINLOCK_TYPES_H #ifndef _ALPHA_SPINLOCK_TYPES_H
#define _ALPHA_SPINLOCK_TYPES_H #define _ALPHA_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -32,7 +32,6 @@ config ARC ...@@ -32,7 +32,6 @@ config ARC
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
......
...@@ -93,7 +93,6 @@ config ARM ...@@ -93,7 +93,6 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG) select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG)
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -196,7 +196,6 @@ config ARM64 ...@@ -196,7 +196,6 @@ config ARM64
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUTEX_CMPXCHG if FUTEX
select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) #if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -52,7 +52,6 @@ config CSKY ...@@ -52,7 +52,6 @@ config CSKY
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUTEX_CMPXCHG if FUTEX && SMP
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#ifndef __ASM_CSKY_SPINLOCK_TYPES_H #ifndef __ASM_CSKY_SPINLOCK_TYPES_H
#define __ASM_CSKY_SPINLOCK_TYPES_H #define __ASM_CSKY_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#ifndef _ASM_SPINLOCK_TYPES_H #ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H #define _ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_IA64_SPINLOCK_TYPES_H #ifndef _ASM_IA64_SPINLOCK_TYPES_H
#define _ASM_IA64_SPINLOCK_TYPES_H #define _ASM_IA64_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -21,7 +21,6 @@ config M68K ...@@ -21,7 +21,6 @@ config M68K
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_UID16 select HAVE_UID16
select MMU_GATHER_NO_RANGE if MMU select MMU_GATHER_NO_RANGE if MMU
......
...@@ -19,7 +19,11 @@ ...@@ -19,7 +19,11 @@
#include <asm/sync.h> #include <asm/sync.h>
#include <asm/war.h> #include <asm/war.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
#include <asm-generic/futex.h>
#define __futex_atomic_op(op, insn, ret, oldval, uaddr, oparg) \
{ \ { \
if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -80,8 +84,10 @@ ...@@ -80,8 +84,10 @@
: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
"i" (-EFAULT) \ "i" (-EFAULT) \
: "memory"); \ : "memory"); \
} else \ } else { \
ret = -ENOSYS; \ /* fallback for non-SMP */ \
ret = futex_atomic_op_inuser_local(op, oparg, oval, uaddr); \
} \
} }
static inline int static inline int
...@@ -94,23 +100,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ...@@ -94,23 +100,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); __futex_atomic_op(op, "move $1, %z5", ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_ADD: case FUTEX_OP_ADD:
__futex_atomic_op("addu $1, %1, %z5", __futex_atomic_op(op, "addu $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_OR:
__futex_atomic_op("or $1, %1, %z5", __futex_atomic_op(op, "or $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_ANDN: case FUTEX_OP_ANDN:
__futex_atomic_op("and $1, %1, %z5", __futex_atomic_op(op, "and $1, %1, %z5",
ret, oldval, uaddr, ~oparg); ret, oldval, uaddr, ~oparg);
break; break;
case FUTEX_OP_XOR: case FUTEX_OP_XOR:
__futex_atomic_op("xor $1, %1, %z5", __futex_atomic_op(op, "xor $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
default: default:
...@@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT) "i" (-EFAULT)
: "memory"); : "memory");
} else } else {
return -ENOSYS; return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
}
*uval = val; *uval = val;
return ret; return ret;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SPINLOCK_TYPES_H #define _ASM_POWERPC_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -83,7 +83,6 @@ config RISCV ...@@ -83,7 +83,6 @@ config RISCV
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_EBPF_JIT if MMU select HAVE_EBPF_JIT if MMU
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef _ASM_RISCV_SPINLOCK_TYPES_H #ifndef _ASM_RISCV_SPINLOCK_TYPES_H
#define _ASM_RISCV_SPINLOCK_TYPES_H #define _ASM_RISCV_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -165,7 +165,6 @@ config S390 ...@@ -165,7 +165,6 @@ config S390
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HAVE_IOREMAP_PROT if PCI select HAVE_IOREMAP_PROT if PCI
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -34,7 +34,6 @@ config SUPERH ...@@ -34,7 +34,6 @@ config SUPERH
select HAVE_FAST_GUP if MMU select HAVE_FAST_GUP if MMU
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_HW_BREAKPOINT select HAVE_HW_BREAKPOINT
select HAVE_IOREMAP_PROT if MMU && !X2TLB select HAVE_IOREMAP_PROT if MMU && !X2TLB
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SH_SPINLOCK_TYPES_H #ifndef __ASM_SH_SPINLOCK_TYPES_H
#define __ASM_SH_SPINLOCK_TYPES_H #define __ASM_SH_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -14,7 +14,6 @@ config UML ...@@ -14,7 +14,6 @@ config UML
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_UID16 select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select NO_DMA if !UML_DMA_EMULATION select NO_DMA if !UML_DMA_EMULATION
......
...@@ -323,7 +323,6 @@ EXPORT_SYMBOL(arch_futex_atomic_op_inuser); ...@@ -323,7 +323,6 @@ EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
* 0 - On success * 0 - On success
* -EFAULT - User access resulted in a page fault * -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention * -EAGAIN - Atomic operation was unable to complete due to contention
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/ */
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
......
...@@ -31,7 +31,6 @@ config XTENSA ...@@ -31,7 +31,6 @@ config XTENSA
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX
select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PCI select HAVE_PCI
......
...@@ -16,6 +16,10 @@ ...@@ -16,6 +16,10 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
#define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
#include <asm-generic/futex.h>
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \ #define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \ __asm__ __volatile( \
...@@ -105,7 +109,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ...@@ -105,7 +109,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
return ret; return ret;
#else #else
return -ENOSYS; return futex_atomic_op_inuser_local(op, oparg, oval, uaddr);
#endif #endif
} }
...@@ -156,7 +160,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -156,7 +160,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret; return ret;
#else #else
return -ENOSYS; return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
#endif #endif
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) #if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -6,15 +6,22 @@ ...@@ -6,15 +6,22 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <asm/errno.h>
#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* /*
* The following implementation only for uniprocessor machines. * The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion. * It relies on preempt_disable() ensuring mutual exclusion.
* *
*/ */
#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
#endif /* CONFIG_SMP */
#endif
/** /**
* arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous * argument and comparison of the previous
* futex value with another constant. * futex value with another constant.
* *
...@@ -28,7 +35,7 @@ ...@@ -28,7 +35,7 @@
* -ENOSYS - Operation not supported * -ENOSYS - Operation not supported
*/ */
static inline int static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{ {
int oldval, ret; int oldval, ret;
u32 tmp; u32 tmp;
...@@ -75,7 +82,7 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) ...@@ -75,7 +82,7 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
} }
/** /**
* futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is * uaddr with newval if the current value is
* oldval. * oldval.
* @uval: pointer to store content of @uaddr * @uval: pointer to store content of @uaddr
...@@ -87,10 +94,9 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) ...@@ -87,10 +94,9 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
* 0 - On success * 0 - On success
* -EFAULT - User access resulted in a page fault * -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention * -EAGAIN - Atomic operation was unable to complete due to contention
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/ */
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
u32 val; u32 val;
...@@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0; return 0;
} }
#else
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
return -ENOSYS;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
return -ENOSYS;
}
#endif /* CONFIG_SMP */
#endif #endif
...@@ -71,14 +71,6 @@ do { \ ...@@ -71,14 +71,6 @@ do { \
do { \ do { \
__this_cpu_dec(hardirq_context); \ __this_cpu_dec(hardirq_context); \
} while (0) } while (0)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
} while (0)
# define lockdep_hrtimer_enter(__hrtimer) \ # define lockdep_hrtimer_enter(__hrtimer) \
({ \ ({ \
...@@ -140,6 +132,21 @@ do { \ ...@@ -140,6 +132,21 @@ do { \
# define lockdep_irq_work_exit(__work) do { } while (0) # define lockdep_irq_work_exit(__work) do { } while (0)
#endif #endif
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
} while (0)
#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \ #if defined(CONFIG_IRQSOFF_TRACER) || \
defined(CONFIG_PREEMPT_TRACER) defined(CONFIG_PREEMPT_TRACER)
extern void stop_critical_timings(void); extern void stop_critical_timings(void);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/param.h> #include <linux/param.h>
#include <linux/spinlock_types.h> #include <linux/spinlock_types_raw.h>
#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
#define DEFAULT_RATELIMIT_BURST 10 #define DEFAULT_RATELIMIT_BURST 10
......
...@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock ...@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
#define rt_mutex_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else #else
extern void rt_mutex_lock(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif #endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock); extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock);
......
...@@ -2178,6 +2178,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); ...@@ -2178,6 +2178,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
/* Returns effective CPU energy utilization, as seen by the scheduler */ /* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu, unsigned long max); unsigned long sched_cpu_util(int cpu, unsigned long max);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
#ifndef __LINUX_SPINLOCK_TYPES_UP_H #ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H #define __LINUX_SPINLOCK_TYPES_UP_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -1579,6 +1579,7 @@ config BASE_FULL ...@@ -1579,6 +1579,7 @@ config BASE_FULL
config FUTEX config FUTEX
bool "Enable futex support" if EXPERT bool "Enable futex support" if EXPERT
depends on !(SPARC32 && SMP)
default y default y
imply RT_MUTEXES imply RT_MUTEXES
help help
...@@ -1591,14 +1592,6 @@ config FUTEX_PI ...@@ -1591,14 +1592,6 @@ config FUTEX_PI
depends on FUTEX && RT_MUTEXES depends on FUTEX && RT_MUTEXES
default y default y
config HAVE_FUTEX_CMPXCHG
bool
depends on FUTEX
help
Architectures should select this if futex_atomic_cmpxchg_inatomic()
is implemented and always working. This removes a couple of runtime
checks.
config EPOLL config EPOLL
bool "Enable eventpoll support" if EXPERT bool "Enable eventpoll support" if EXPERT
default y default y
......
...@@ -41,11 +41,6 @@ ...@@ -41,11 +41,6 @@
#include "futex.h" #include "futex.h"
#include "../locking/rtmutex_common.h" #include "../locking/rtmutex_common.h"
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled;
#endif
/* /*
* The base of the bucket array and its size are always used together * The base of the bucket array and its size are always used together
* (after initialization only in futex_hash()), so ensure that they * (after initialization only in futex_hash()), so ensure that they
...@@ -776,9 +771,6 @@ static void exit_robust_list(struct task_struct *curr) ...@@ -776,9 +771,6 @@ static void exit_robust_list(struct task_struct *curr)
unsigned long futex_offset; unsigned long futex_offset;
int rc; int rc;
if (!futex_cmpxchg_enabled)
return;
/* /*
* Fetch the list head (which was registered earlier, via * Fetch the list head (which was registered earlier, via
* sys_set_robust_list()): * sys_set_robust_list()):
...@@ -874,9 +866,6 @@ static void compat_exit_robust_list(struct task_struct *curr) ...@@ -874,9 +866,6 @@ static void compat_exit_robust_list(struct task_struct *curr)
compat_long_t futex_offset; compat_long_t futex_offset;
int rc; int rc;
if (!futex_cmpxchg_enabled)
return;
/* /*
* Fetch the list head (which was registered earlier, via * Fetch the list head (which was registered earlier, via
* sys_set_robust_list()): * sys_set_robust_list()):
...@@ -950,8 +939,6 @@ static void exit_pi_state_list(struct task_struct *curr) ...@@ -950,8 +939,6 @@ static void exit_pi_state_list(struct task_struct *curr)
struct futex_hash_bucket *hb; struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT; union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
/* /*
* We are a ZOMBIE and nobody can enqueue itself on * We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful * pi_state_list anymore, but we have to be careful
...@@ -1125,26 +1112,6 @@ void futex_exit_release(struct task_struct *tsk) ...@@ -1125,26 +1112,6 @@ void futex_exit_release(struct task_struct *tsk)
futex_cleanup_end(tsk, FUTEX_STATE_DEAD); futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
} }
static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (futex_cmpxchg_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
#endif
}
static int __init futex_init(void) static int __init futex_init(void)
{ {
unsigned int futex_shift; unsigned int futex_shift;
...@@ -1163,8 +1130,6 @@ static int __init futex_init(void) ...@@ -1163,8 +1130,6 @@ static int __init futex_init(void)
futex_hashsize, futex_hashsize); futex_hashsize, futex_hashsize);
futex_hashsize = 1UL << futex_shift; futex_hashsize = 1UL << futex_shift;
futex_detect_cmpxchg();
for (i = 0; i < futex_hashsize; i++) { for (i = 0; i < futex_hashsize; i++) {
atomic_set(&futex_queues[i].waiters, 0); atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain); plist_head_init(&futex_queues[i].chain);
......
...@@ -27,12 +27,6 @@ ...@@ -27,12 +27,6 @@
#define FLAGS_CLOCKRT 0x02 #define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04 #define FLAGS_HAS_TIMEOUT 0x04
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
extern int __read_mostly futex_cmpxchg_enabled;
#endif
#ifdef CONFIG_FAIL_FUTEX #ifdef CONFIG_FAIL_FUTEX
extern bool should_fail_futex(bool fshared); extern bool should_fail_futex(bool fshared);
#else #else
......
...@@ -29,8 +29,6 @@ ...@@ -29,8 +29,6 @@
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
size_t, len) size_t, len)
{ {
if (!futex_cmpxchg_enabled)
return -ENOSYS;
/* /*
* The kernel knows only one size for now: * The kernel knows only one size for now:
*/ */
...@@ -56,9 +54,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, ...@@ -56,9 +54,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
unsigned long ret; unsigned long ret;
struct task_struct *p; struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock(); rcu_read_lock();
ret = -ESRCH; ret = -ESRCH;
...@@ -103,17 +98,6 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, ...@@ -103,17 +98,6 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
return -ENOSYS; return -ENOSYS;
} }
switch (cmd) {
case FUTEX_LOCK_PI:
case FUTEX_LOCK_PI2:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
case FUTEX_CMP_REQUEUE_PI:
if (!futex_cmpxchg_enabled)
return -ENOSYS;
}
switch (cmd) { switch (cmd) {
case FUTEX_WAIT: case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY; val3 = FUTEX_BITSET_MATCH_ANY;
...@@ -323,9 +307,6 @@ COMPAT_SYSCALL_DEFINE2(set_robust_list, ...@@ -323,9 +307,6 @@ COMPAT_SYSCALL_DEFINE2(set_robust_list,
struct compat_robust_list_head __user *, head, struct compat_robust_list_head __user *, head,
compat_size_t, len) compat_size_t, len)
{ {
if (!futex_cmpxchg_enabled)
return -ENOSYS;
if (unlikely(len != sizeof(*head))) if (unlikely(len != sizeof(*head)))
return -EINVAL; return -EINVAL;
...@@ -342,9 +323,6 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, ...@@ -342,9 +323,6 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
unsigned long ret; unsigned long ret;
struct task_struct *p; struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock(); rcu_read_lock();
ret = -ESRCH; ret = -ESRCH;
......
...@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned long flags) ...@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned long flags)
} }
} }
#ifndef CONFIG_PREEMPT_RT
/* /*
* We dont accurately track softirq state in e.g. * We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only * hardirq contexts (such as on 4KSTACKS), so only
...@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned long flags) ...@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
} }
} }
#endif
if (!debug_locks) if (!debug_locks)
print_irqtrace_events(current); print_irqtrace_events(current);
......
...@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, ...@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
/* /*
* Use vcpu_is_preempted to detect lock holder preemption issue. * Use vcpu_is_preempted to detect lock holder preemption issue.
*/ */
if (!owner->on_cpu || need_resched() || if (!owner_on_cpu(owner) || need_resched()) {
vcpu_is_preempted(task_cpu(owner))) {
ret = false; ret = false;
break; break;
} }
...@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
* structure won't go away during the spinning period. * structure won't go away during the spinning period.
*/ */
owner = __mutex_owner(lock); owner = __mutex_owner(lock);
/*
* As lock holder preemption issue, we both skip spinning if task is not
* on cpu or its cpu is preempted
*/
if (owner) if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); retval = owner_on_cpu(owner);
/* /*
* If lock->owner is not set, the mutex has been released. Return true * If lock->owner is not set, the mutex has been released. Return true
......
...@@ -1103,8 +1103,11 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, ...@@ -1103,8 +1103,11 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
* the other will detect the deadlock and return -EDEADLOCK, * the other will detect the deadlock and return -EDEADLOCK,
* which is wrong, as the other waiter is not in a deadlock * which is wrong, as the other waiter is not in a deadlock
* situation. * situation.
*
* Except for ww_mutex, in that case the chain walk must already deal
* with spurious cycles, see the comments at [3] and [6].
*/ */
if (owner == task) if (owner == task && !(build_ww_mutex() && ww_ctx))
return -EDEADLK; return -EDEADLK;
raw_spin_lock(&task->pi_lock); raw_spin_lock(&task->pi_lock);
...@@ -1379,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, ...@@ -1379,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* for CONFIG_PREEMPT_RCU=y) * for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted * - the VCPU on which owner runs is preempted
*/ */
if (!owner->on_cpu || need_resched() || if (!owner_on_cpu(owner) || need_resched() ||
!rt_mutex_waiter_is_top_waiter(lock, waiter) || !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
vcpu_is_preempted(task_cpu(owner))) {
res = false; res = false;
break; break;
} }
......
...@@ -21,12 +21,13 @@ int max_lock_depth = 1024; ...@@ -21,12 +21,13 @@ int max_lock_depth = 1024;
*/ */
static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
unsigned int state, unsigned int state,
struct lockdep_map *nest_lock,
unsigned int subclass) unsigned int subclass)
{ {
int ret; int ret;
might_sleep(); might_sleep();
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
ret = __rt_mutex_lock(&lock->rtmutex, state); ret = __rt_mutex_lock(&lock->rtmutex, state);
if (ret) if (ret)
mutex_release(&lock->dep_map, _RET_IP_); mutex_release(&lock->dep_map, _RET_IP_);
...@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init); ...@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init);
*/ */
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
{ {
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
}
EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
#else /* !CONFIG_DEBUG_LOCK_ALLOC */ #else /* !CONFIG_DEBUG_LOCK_ALLOC */
/** /**
...@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); ...@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
*/ */
void __sched rt_mutex_lock(struct rt_mutex *lock) void __sched rt_mutex_lock(struct rt_mutex *lock)
{ {
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock); EXPORT_SYMBOL_GPL(rt_mutex_lock);
#endif #endif
...@@ -77,10 +84,25 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); ...@@ -77,10 +84,25 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/ */
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{ {
return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
* rt_mutex_lock_killable - lock a rt_mutex killable
*
* @lock: the rt_mutex to be locked
*
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
*/
int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
{
return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
/** /**
* rt_mutex_trylock - try to lock a rt_mutex * rt_mutex_trylock - try to lock a rt_mutex
* *
......
...@@ -658,15 +658,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) ...@@ -658,15 +658,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
return false; return false;
} }
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
}
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{ {
struct task_struct *owner; struct task_struct *owner;
......
...@@ -257,12 +257,6 @@ void __sched rt_write_unlock(rwlock_t *rwlock) ...@@ -257,12 +257,6 @@ void __sched rt_write_unlock(rwlock_t *rwlock)
} }
EXPORT_SYMBOL(rt_write_unlock); EXPORT_SYMBOL(rt_write_unlock);
int __sched rt_rwlock_is_contended(rwlock_t *rwlock)
{
return rw_base_is_contended(&rwlock->rwbase);
}
EXPORT_SYMBOL(rt_rwlock_is_contended);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_rwlock_init(rwlock_t *rwlock, const char *name, void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
......
...@@ -26,7 +26,7 @@ int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ...@@ -26,7 +26,7 @@ int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
if (__rt_mutex_trylock(&rtm->rtmutex)) { if (__rt_mutex_trylock(&rtm->rtmutex)) {
ww_mutex_set_context_fastpath(lock, ww_ctx); ww_mutex_set_context_fastpath(lock, ww_ctx);
mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_); mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
return 1; return 1;
} }
......
...@@ -2184,6 +2184,9 @@ void migrate_enable(void) ...@@ -2184,6 +2184,9 @@ void migrate_enable(void)
return; return;
} }
if (WARN_ON_ONCE(!p->migration_disabled))
return;
/* /*
* Ensure stop_task runs either before or after this, and that * Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
......
...@@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=) ...@@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \ #define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \ ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=) ATOMIC64_OPS(and, &=)
...@@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=) ...@@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
s64 generic_atomic64_dec_if_positive(atomic64_t *v) s64 generic_atomic64_dec_if_positive(atomic64_t *v)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment