Commit febca281 authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller

[AF_IUCV]: Add lock when updating accept_q

The accept_queue of an af_iucv socket will be corrupted, if
adding and deleting of entries in this queue occurs at the
same time (connect request from one client, while accept call
is processed for another client).
Solution: add locking when updating accept_q
Signed-off-by: default avatarUrsula Braun <braunu@de.ibm.com>
Acked-by: default avatarFrank Pavlic <fpavlic@de.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 13fdc9a7
...@@ -60,6 +60,7 @@ struct iucv_sock { ...@@ -60,6 +60,7 @@ struct iucv_sock {
char dst_user_id[8]; char dst_user_id[8];
char dst_name[8]; char dst_name[8];
struct list_head accept_q; struct list_head accept_q;
spinlock_t accept_q_lock;
struct sock *parent; struct sock *parent;
struct iucv_path *path; struct iucv_path *path;
struct sk_buff_head send_skb_q; struct sk_buff_head send_skb_q;
......
...@@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) ...@@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
sock_init_data(sock, sk); sock_init_data(sock, sk);
INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
spin_lock_init(&iucv_sk(sk)->accept_q_lock);
skb_queue_head_init(&iucv_sk(sk)->send_skb_q); skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
iucv_sk(sk)->send_tag = 0; iucv_sk(sk)->send_tag = 0;
...@@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) ...@@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
void iucv_accept_enqueue(struct sock *parent, struct sock *sk) void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
{ {
unsigned long flags;
struct iucv_sock *par = iucv_sk(parent);
sock_hold(sk); sock_hold(sk);
list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q); spin_lock_irqsave(&par->accept_q_lock, flags);
list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
iucv_sk(sk)->parent = parent; iucv_sk(sk)->parent = parent;
parent->sk_ack_backlog++; parent->sk_ack_backlog++;
} }
void iucv_accept_unlink(struct sock *sk) void iucv_accept_unlink(struct sock *sk)
{ {
unsigned long flags;
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
spin_lock_irqsave(&par->accept_q_lock, flags);
list_del_init(&iucv_sk(sk)->accept_q); list_del_init(&iucv_sk(sk)->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
iucv_sk(sk)->parent->sk_ack_backlog--; iucv_sk(sk)->parent->sk_ack_backlog--;
iucv_sk(sk)->parent = NULL; iucv_sk(sk)->parent = NULL;
sock_put(sk); sock_put(sk);
...@@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) ...@@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
lock_sock(sk); lock_sock(sk);
if (sk->sk_state == IUCV_CLOSED) { if (sk->sk_state == IUCV_CLOSED) {
release_sock(sk);
iucv_accept_unlink(sk); iucv_accept_unlink(sk);
release_sock(sk);
continue; continue;
} }
...@@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path, ...@@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Find out if this path belongs to af_iucv. */ /* Find out if this path belongs to af_iucv. */
read_lock(&iucv_sk_list.lock); read_lock(&iucv_sk_list.lock);
iucv = NULL; iucv = NULL;
sk = NULL;
sk_for_each(sk, node, &iucv_sk_list.head) sk_for_each(sk, node, &iucv_sk_list.head)
if (sk->sk_state == IUCV_LISTEN && if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment