Commit ab51ec6b authored by Martijn Coenen's avatar Martijn Coenen Committed by Greg Kroah-Hartman

binder: fix death race conditions

A race existed where one thread could register
a death notification for a node, while another
thread was cleaning up that node and sending
out death notifications for its references,
causing simultaneous access to ref->death
because different locks were held.
Signed-off-by: default avatarMartijn Coenen <maco@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5f2f6369
...@@ -442,6 +442,7 @@ struct binder_ref_data { ...@@ -442,6 +442,7 @@ struct binder_ref_data {
* ref for deletion in binder_cleanup_ref, a non-NULL * ref for deletion in binder_cleanup_ref, a non-NULL
* @node indicates the node must be freed * @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested * @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
* *
* Structure to track references from procA to target node (on procB). This * Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock. * structure is unsafe to access without holding @proc->outer_lock.
...@@ -3337,10 +3338,12 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3337,10 +3338,12 @@ static int binder_thread_write(struct binder_proc *proc,
ref->data.desc, ref->data.strong, ref->data.desc, ref->data.strong,
ref->data.weak, ref->node->debug_id); ref->data.weak, ref->node->debug_id);
binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) { if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid); proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc); binder_proc_unlock(proc);
kfree(death); kfree(death);
break; break;
...@@ -3349,7 +3352,6 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3349,7 +3352,6 @@ static int binder_thread_write(struct binder_proc *proc,
INIT_LIST_HEAD(&death->work.entry); INIT_LIST_HEAD(&death->work.entry);
death->cookie = cookie; death->cookie = cookie;
ref->death = death; ref->death = death;
binder_node_lock(ref->node);
if (ref->node->proc == NULL) { if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER; ref->death->work.type = BINDER_WORK_DEAD_BINDER;
if (thread->looper & if (thread->looper &
...@@ -3368,9 +3370,7 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3368,9 +3370,7 @@ static int binder_thread_write(struct binder_proc *proc,
&proc->wait); &proc->wait);
} }
} }
binder_node_unlock(ref->node);
} else { } else {
binder_node_lock(ref->node);
if (ref->death == NULL) { if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid); proc->pid, thread->pid);
...@@ -3410,8 +3410,8 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3410,8 +3410,8 @@ static int binder_thread_write(struct binder_proc *proc,
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
} }
binder_inner_proc_unlock(proc); binder_inner_proc_unlock(proc);
binder_node_unlock(ref->node);
} }
binder_node_unlock(ref->node);
binder_proc_unlock(proc); binder_proc_unlock(proc);
} break; } break;
case BC_DEAD_BINDER_DONE: { case BC_DEAD_BINDER_DONE: {
...@@ -3748,44 +3748,39 @@ static int binder_thread_read(struct binder_proc *proc, ...@@ -3748,44 +3748,39 @@ static int binder_thread_read(struct binder_proc *proc,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death; struct binder_ref_death *death;
uint32_t cmd; uint32_t cmd;
binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work); death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else else
cmd = BR_DEAD_BINDER; cmd = BR_DEAD_BINDER;
/* cookie = death->cookie;
* TODO: there is a race condition between
* death notification requests and delivery
* of the notifications. This will be handled
* in a later patch.
*/
binder_inner_proc_unlock(proc);
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(death->cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n", "%d:%d %s %016llx\n",
proc->pid, thread->pid, proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ? cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" : "BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE", "BR_CLEAR_DEATH_NOTIFICATION_DONE",
(u64)death->cookie); (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
binder_inner_proc_unlock(proc);
kfree(death); kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH); binder_stats_deleted(BINDER_STAT_DEATH);
} else { } else {
binder_inner_proc_lock(proc);
binder_enqueue_work_ilocked( binder_enqueue_work_ilocked(
w, &proc->delivered_death); w, &proc->delivered_death);
binder_inner_proc_unlock(proc); binder_inner_proc_unlock(proc);
} }
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER) if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */ goto done; /* DEAD_BINDER notifications can cause transactions */
} break; } break;
...@@ -4535,20 +4530,25 @@ static int binder_node_release(struct binder_node *node, int refs) ...@@ -4535,20 +4530,25 @@ static int binder_node_release(struct binder_node *node, int refs)
hlist_for_each_entry(ref, &node->refs, node_entry) { hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++; refs++;
/*
if (!ref->death) * Need the node lock to synchronize
* with new notification requests and the
* inner lock to synchronize with queued
* death notifications.
*/
binder_inner_proc_lock(ref->proc);
if (!ref->death) {
binder_inner_proc_unlock(ref->proc);
continue; continue;
}
death++; death++;
binder_inner_proc_lock(ref->proc); BUG_ON(!list_empty(&ref->death->work.entry));
if (list_empty(&ref->death->work.entry)) { ref->death->work.type = BINDER_WORK_DEAD_BINDER;
ref->death->work.type = BINDER_WORK_DEAD_BINDER; binder_enqueue_work_ilocked(&ref->death->work,
binder_enqueue_work_ilocked(&ref->death->work, &ref->proc->todo);
&ref->proc->todo); wake_up_interruptible(&ref->proc->wait);
wake_up_interruptible(&ref->proc->wait);
} else
BUG();
binder_inner_proc_unlock(ref->proc); binder_inner_proc_unlock(ref->proc);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment