Commit 03b8c7b6 authored by Heiko Carstens's avatar Heiko Carstens Committed by Thomas Gleixner

futex: Allow architectures to skip futex_atomic_cmpxchg_inatomic() test

If an architecture has futex_atomic_cmpxchg_inatomic() implemented and there
is no runtime check necessary, allow to skip the test within futex_init().

This allows to get rid of some code which would always give the same result,
and also allows the compiler to optimize a couple of if statements away.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Cc: Finn Thain <fthain@telegraphics.com.au>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Link: http://lkml.kernel.org/r/20140302120947.GA3641@osirisSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d2ae2e52
...@@ -117,6 +117,7 @@ config S390 ...@@ -117,6 +117,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZ4
......
...@@ -55,7 +55,11 @@ union futex_key { ...@@ -55,7 +55,11 @@ union futex_key {
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr); extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr); extern void exit_pi_state_list(struct task_struct *curr);
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
extern int futex_cmpxchg_enabled; extern int futex_cmpxchg_enabled;
#endif
#else #else
static inline void exit_robust_list(struct task_struct *curr) static inline void exit_robust_list(struct task_struct *curr)
{ {
......
...@@ -1387,6 +1387,13 @@ config FUTEX ...@@ -1387,6 +1387,13 @@ config FUTEX
support for "fast userspace mutexes". The resulting kernel may not support for "fast userspace mutexes". The resulting kernel may not
run glibc-based applications correctly. run glibc-based applications correctly.
config HAVE_FUTEX_CMPXCHG
bool
help
Architectures should select this if futex_atomic_cmpxchg_inatomic()
is implemented and always working. This removes a couple of runtime
checks.
config EPOLL config EPOLL
bool "Enable eventpoll support" if EXPERT bool "Enable eventpoll support" if EXPERT
default y default y
......
...@@ -157,7 +157,9 @@ ...@@ -157,7 +157,9 @@
* enqueue. * enqueue.
*/ */
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled; int __read_mostly futex_cmpxchg_enabled;
#endif
/* /*
* Futex flags used to encode options to functions and preserve them across * Futex flags used to encode options to functions and preserve them across
...@@ -2843,9 +2845,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, ...@@ -2843,9 +2845,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
} }
static int __init futex_init(void) static void __init futex_detect_cmpxchg(void)
{ {
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval; u32 curval;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
#endif
}
static int __init futex_init(void)
{
unsigned int futex_shift; unsigned int futex_shift;
unsigned long i; unsigned long i;
...@@ -2861,18 +2882,8 @@ static int __init futex_init(void) ...@@ -2861,18 +2882,8 @@ static int __init futex_init(void)
&futex_shift, NULL, &futex_shift, NULL,
futex_hashsize, futex_hashsize); futex_hashsize, futex_hashsize);
futex_hashsize = 1UL << futex_shift; futex_hashsize = 1UL << futex_shift;
/*
* This will fail and we want it. Some arch implementations do futex_detect_cmpxchg();
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
for (i = 0; i < futex_hashsize; i++) { for (i = 0; i < futex_hashsize; i++) {
plist_head_init(&futex_queues[i].chain); plist_head_init(&futex_queues[i].chain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment