Commit 8f17d3a5 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] lightweight robust futexes updates

- fix: initialize the robust list(s) to NULL in copy_process.

- doc update

- cleanup: rename _inuser to _inatomic

- __user cleanups and other small cleanups
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8fdd6c6d
No related merge requests found
...@@ -142,8 +142,6 @@ On insertion: ...@@ -142,8 +142,6 @@ On insertion:
of the 'lock word', to the linked list starting at 'head', and of the 'lock word', to the linked list starting at 'head', and
4) clear the 'list_op_pending' word. 4) clear the 'list_op_pending' word.
XXX I am particularly unsure of the following -pj XXX
On removal: On removal:
1) set the 'list_op_pending' word to the address of the 'lock word' 1) set the 'list_op_pending' word to the address of the 'lock word'
to be removed, to be removed,
......
...@@ -213,6 +213,6 @@ robust-mutex testcases. ...@@ -213,6 +213,6 @@ robust-mutex testcases.
All other architectures should build just fine too - but they wont have All other architectures should build just fine too - but they wont have
the new syscalls yet. the new syscalls yet.
Architectures need to implement the new futex_atomic_cmpxchg_inuser() Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
inline function before writing up the syscalls (that function returns inline function before writing up the syscalls (that function returns
-ENOSYS right now). -ENOSYS right now).
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -50,7 +50,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -50,7 +50,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -105,7 +105,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -105,7 +105,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{ {
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
......
...@@ -100,7 +100,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -100,7 +100,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -82,7 +82,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -82,7 +82,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -95,7 +95,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -95,7 +95,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{ {
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
......
...@@ -100,7 +100,7 @@ long do_futex(unsigned long uaddr, int op, int val, ...@@ -100,7 +100,7 @@ long do_futex(unsigned long uaddr, int op, int val,
unsigned long timeout, unsigned long uaddr2, int val2, unsigned long timeout, unsigned long uaddr2, int val2,
int val3); int val3);
extern int handle_futex_death(unsigned int *uaddr, struct task_struct *curr); extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr); extern void exit_robust_list(struct task_struct *curr);
......
...@@ -1061,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags, ...@@ -1061,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags,
* Clear TID on mm_release()? * Clear TID on mm_release()?
*/ */
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
/* /*
* sigaltstack should be cleared when sharing the same VM * sigaltstack should be cleared when sharing the same VM
*/ */
......
...@@ -913,15 +913,15 @@ sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, ...@@ -913,15 +913,15 @@ sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
* Process a futex-list entry, check whether it's owned by the * Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so: * dying task, and do notification if so:
*/ */
int handle_futex_death(unsigned int *uaddr, struct task_struct *curr) int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
{ {
unsigned int futex_val; u32 uval;
repeat: retry:
if (get_user(futex_val, uaddr)) if (get_user(uval, uaddr))
return -1; return -1;
if ((futex_val & FUTEX_TID_MASK) == curr->pid) { if ((uval & FUTEX_TID_MASK) == curr->pid) {
/* /*
* Ok, this dying thread is truly holding a futex * Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically * of interest. Set the OWNER_DIED bit atomically
...@@ -932,12 +932,11 @@ int handle_futex_death(unsigned int *uaddr, struct task_struct *curr) ...@@ -932,12 +932,11 @@ int handle_futex_death(unsigned int *uaddr, struct task_struct *curr)
* thread-death.) The rest of the cleanup is done in * thread-death.) The rest of the cleanup is done in
* userspace. * userspace.
*/ */
if (futex_atomic_cmpxchg_inuser(uaddr, futex_val, if (futex_atomic_cmpxchg_inatomic(uaddr, uval,
futex_val | FUTEX_OWNER_DIED) != uval | FUTEX_OWNER_DIED) != uval)
futex_val) goto retry;
goto repeat;
if (futex_val & FUTEX_WAITERS) if (uval & FUTEX_WAITERS)
futex_wake((unsigned long)uaddr, 1); futex_wake((unsigned long)uaddr, 1);
} }
return 0; return 0;
...@@ -985,7 +984,6 @@ void exit_robust_list(struct task_struct *curr) ...@@ -985,7 +984,6 @@ void exit_robust_list(struct task_struct *curr)
if (handle_futex_death((void *)entry + futex_offset, if (handle_futex_death((void *)entry + futex_offset,
curr)) curr))
return; return;
/* /*
* Fetch the next entry in the list: * Fetch the next entry in the list:
*/ */
......
...@@ -121,9 +121,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr, ...@@ -121,9 +121,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
return ret; return ret;
} }
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val, asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
struct compat_timespec __user *utime, u32 __user *uaddr2, struct compat_timespec __user *utime, u32 __user *uaddr2,
int val3) u32 val3)
{ {
struct timespec t; struct timespec t;
unsigned long timeout = MAX_SCHEDULE_TIMEOUT; unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
...@@ -137,6 +137,5 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val, ...@@ -137,6 +137,5 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
if (op >= FUTEX_REQUEUE) if (op >= FUTEX_REQUEUE)
val2 = (int) (unsigned long) utime; val2 = (int) (unsigned long) utime;
return do_futex((unsigned long)uaddr, op, val, timeout, return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
(unsigned long)uaddr2, val2, val3);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment