Commit dcc909d9 authored by Markus Pargmann's avatar Markus Pargmann Committed by Jens Axboe

nbd: Add locking for tasks

The timeout handling introduced in
	7e2893a1 (nbd: Fix timeout detection)
introduces a race condition which may lead to killing of tasks that are
not in nbd context anymore. This was not observed or reproducable yet.

This patch adds locking to critical use of task_recv and task_send to
avoid killing tasks that already left the NBD thread functions. This
lock is only acquired if a timeout occures or the nbd device
starts/stops.
Reported-by: default avatarBen Hutchings <ben@decadent.org.uk>
Signed-off-by: default avatarMarkus Pargmann <mpa@pengutronix.de>
Reviewed-by: default avatarBen Hutchings <ben@decadent.org.uk>
Fixes: 7e2893a1 ("nbd: Fix timeout detection")
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent c3984cc9
......@@ -60,6 +60,7 @@ struct nbd_device {
bool disconnect; /* a disconnect has been requested by user */
struct timer_list timeout_timer;
spinlock_t tasks_lock;
struct task_struct *task_recv;
struct task_struct *task_send;
......@@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
static void nbd_xmit_timeout(unsigned long arg)
{
struct nbd_device *nbd = (struct nbd_device *)arg;
struct task_struct *task;
unsigned long flags;
if (list_empty(&nbd->queue_head))
return;
nbd->disconnect = true;
task = READ_ONCE(nbd->task_recv);
if (task)
force_sig(SIGKILL, task);
spin_lock_irqsave(&nbd->tasks_lock, flags);
if (nbd->task_recv)
force_sig(SIGKILL, nbd->task_recv);
task = READ_ONCE(nbd->task_send);
if (task)
if (nbd->task_send)
force_sig(SIGKILL, nbd->task_send);
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
}
......@@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
{
struct request *req;
int ret;
unsigned long flags;
BUG_ON(nbd->magic != NBD_MAGIC);
sk_set_memalloc(nbd->sock->sk);
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = current;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
return ret;
}
......@@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
if (signal_pending(current)) {
siginfo_t info;
......@@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
{
struct nbd_device *nbd = data;
struct request *req;
unsigned long flags;
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = current;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
......@@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
nbd_handle_req(nbd, req);
}
spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = NULL;
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
/* Clear maybe pending signals */
if (signal_pending(current)) {
siginfo_t info;
dequeue_signal_lock(current, &current->blocked, &info);
}
return 0;
}
......@@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
nbd_dev[i].magic = NBD_MAGIC;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock);
spin_lock_init(&nbd_dev[i].tasks_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
init_timer(&nbd_dev[i].timeout_timer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment