Commit 61c9fed4 authored by Vladislav Yasevich's avatar Vladislav Yasevich Committed by Sridhar Samudrala

[SCTP]: A better solution to fix the race between sctp_peeloff() and

sctp_rcv().

The goal is to hold the ref on the association/endpoint throughout the
state-machine process.  We accomplish like this:

  /* ref on the assoc/ep is taken during lookup */

  if owned_by_user(sk)
 	sctp_add_backlog(skb, sk);
  else
 	inqueue_push(skb, sk);

  /* drop the ref on the assoc/ep */

However, in sctp_add_backlog() we take the ref on assoc/ep and hold it
while the skb is on the backlog queue.  This allows us to get rid of the
sock_hold/sock_put in the lookup routines.

Now sctp_backlog_rcv() needs to account for potential association move.
In the unlikely event that association moved, we need to retest if the
new socket is locked by user.  If we don't this, we may have two packets
racing up the stack toward the same socket and we can't deal with it.
If the new socket is still locked, we'll just add the skb to its backlog
continuing to hold the ref on the association.  This get's rid of the
need to move packets from one backlog to another and it also safe in
case new packets arrive on the same backlog queue.

The last step, is to lock the new socket when we are moving the
association to it.  This is needed in case any new packets arrive on
the association when it moved.  We want these to go to the backlog since
we would like to avoid the race between this new packet and a packet
that may be sitting on the backlog queue of the old socket toward the
same association.
Signed-off-by: default avatarVladislav Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: default avatarSridhar Samudrala <sri@us.ibm.com>
parent 8de8c873
...@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer, const union sctp_addr *peer,
struct sctp_transport **pt); struct sctp_transport **pt);
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */ /* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb) static inline int sctp_rcv_checksum(struct sk_buff *skb)
...@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb)
*/ */
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{ {
sock_put(sk);
if (asoc) { if (asoc) {
sctp_association_put(asoc); sctp_association_put(asoc);
asoc = NULL; asoc = NULL;
...@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb)
sk = sctp_get_ctl_sock(); sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep; ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep); sctp_endpoint_hold(ep);
sock_hold(sk);
rcvr = &ep->base; rcvr = &ep->base;
} }
...@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb)
*/ */
sctp_bh_lock_sock(sk); sctp_bh_lock_sock(sk);
/* It is possible that the association could have moved to a different
* socket if it is peeled off. If so, update the sk.
*/
if (sk != rcvr->sk) {
sctp_bh_lock_sock(rcvr->sk);
sctp_bh_unlock_sock(sk);
sk = rcvr->sk;
}
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
sk_add_backlog(sk, skb); sctp_add_backlog(sk, skb);
else else
sctp_backlog_rcv(sk, skb); sctp_inq_push(&chunk->rcvr->inqueue, chunk);
/* Release the sock and the sock ref we took in the lookup calls.
* The asoc/ep ref will be released in sctp_backlog_rcv.
*/
sctp_bh_unlock_sock(sk); sctp_bh_unlock_sock(sk);
sock_put(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
return 0; return 0;
...@@ -280,8 +273,7 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -280,8 +273,7 @@ int sctp_rcv(struct sk_buff *skb)
return 0; return 0;
discard_release: discard_release:
/* Release any structures we may be holding. */ /* Release the asoc/ep ref we took in the lookup calls. */
sock_put(sk);
if (asoc) if (asoc)
sctp_association_put(asoc); sctp_association_put(asoc);
else else
...@@ -290,56 +282,87 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -290,56 +282,87 @@ int sctp_rcv(struct sk_buff *skb)
goto discard_it; goto discard_it;
} }
/* Handle second half of inbound skb processing. If the sock was busy, /* Process the backlog queue of the socket. Every skb on
* we may have need to delay processing until later when the sock is * the backlog holds a ref on an association or endpoint.
* released (on the backlog). If not busy, we call this routine * We hold this ref throughout the state machine to make
* directly from the bottom half. * sure that the structure we need is still around.
*/ */
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{ {
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_inq *inqueue = NULL; struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL; struct sctp_ep_common *rcvr = NULL;
int backloged = 0;
rcvr = chunk->rcvr; rcvr = chunk->rcvr;
BUG_TRAP(rcvr->sk == sk); /* If the rcvr is dead then the association or endpoint
* has been deleted and we can safely drop the chunk
if (rcvr->dead) { * and refs that we are holding.
sctp_chunk_free(chunk); */
} else { if (rcvr->dead) {
inqueue = &chunk->rcvr->inqueue; sctp_chunk_free(chunk);
sctp_inq_push(inqueue, chunk); goto done;
} }
/* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ if (unlikely(rcvr->sk != sk)) {
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) /* In this case, the association moved from one socket to
sctp_association_put(sctp_assoc(rcvr)); * another. We are currently sitting on the backlog of the
else * old socket, so we need to move.
sctp_endpoint_put(sctp_ep(rcvr)); * However, since we are here in the process context we
* need to take make sure that the user doesn't own
* the new socket when we process the packet.
* If the new socket is user-owned, queue the chunk to the
* backlog of the new socket without dropping any refs.
* Otherwise, we can safely push the chunk on the inqueue.
*/
sk = rcvr->sk;
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
sk_add_backlog(sk, skb);
backloged = 1;
} else
sctp_inq_push(inqueue, chunk);
sctp_bh_unlock_sock(sk);
/* If the chunk was backloged again, don't drop refs */
if (backloged)
return 0;
} else {
sctp_inq_push(inqueue, chunk);
}
done:
/* Release the refs we took in sctp_add_backlog */
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
sctp_association_put(sctp_assoc(rcvr));
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_put(sctp_ep(rcvr));
else
BUG();
return 0; return 0;
} }
void sctp_backlog_migrate(struct sctp_association *assoc, static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
struct sock *oldsk, struct sock *newsk)
{ {
struct sk_buff *skb; struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_chunk *chunk; struct sctp_ep_common *rcvr = chunk->rcvr;
skb = oldsk->sk_backlog.head; /* Hold the assoc/ep while hanging on the backlog queue.
oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; * This way, we know structures we need will not disappear from us
while (skb != NULL) { */
struct sk_buff *next = skb->next; if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
sctp_association_hold(sctp_assoc(rcvr));
chunk = SCTP_INPUT_CB(skb)->chunk; else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
skb->next = NULL; sctp_endpoint_hold(sctp_ep(rcvr));
if (&assoc->base == chunk->rcvr) else
sk_add_backlog(newsk, skb); BUG();
else
sk_add_backlog(oldsk, skb); sk_add_backlog(sk, skb);
skb = next;
}
} }
/* Handle icmp frag needed error. */ /* Handle icmp frag needed error. */
...@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, ...@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
return sk; return sk;
out: out:
sock_put(sk);
if (asoc) if (asoc)
sctp_association_put(asoc); sctp_association_put(asoc);
return NULL; return NULL;
...@@ -463,7 +485,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, ...@@ -463,7 +485,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{ {
sctp_bh_unlock_sock(sk); sctp_bh_unlock_sock(sk);
sock_put(sk);
if (asoc) if (asoc)
sctp_association_put(asoc); sctp_association_put(asoc);
} }
...@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l ...@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
hit: hit:
sctp_endpoint_hold(ep); sctp_endpoint_hold(ep);
sock_hold(epb->sk);
read_unlock(&head->lock); read_unlock(&head->lock);
return ep; return ep;
} }
...@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association(
hit: hit:
*pt = transport; *pt = transport;
sctp_association_hold(asoc); sctp_association_hold(asoc);
sock_hold(epb->sk);
read_unlock(&head->lock); read_unlock(&head->lock);
return asoc; return asoc;
} }
...@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr, ...@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr,
struct sctp_transport *transport; struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
sock_put(asoc->base.sk);
sctp_association_put(asoc); sctp_association_put(asoc);
return 1; return 1;
} }
......
...@@ -1229,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) ...@@ -1229,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep; ep = sctp_sk(sk)->ep;
/* Walk all associations on a socket, not on an endpoint. */ /* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) { list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs); asoc = list_entry(pos, struct sctp_association, asocs);
...@@ -5318,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, ...@@ -5318,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
*/ */
sctp_release_sock(sk); sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo); current_timeo = schedule_timeout(current_timeo);
BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk); sctp_lock_sock(sk);
*timeo_p = current_timeo; *timeo_p = current_timeo;
...@@ -5605,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -5605,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/ */
newsp->type = type; newsp->type = type;
spin_lock_bh(&oldsk->sk_lock.slock); /* Mark the new socket "in-use" by the user so that any packets
/* Migrate the backlog from oldsk to newsk. */ * that may arrive on the association after we've moved it are
sctp_backlog_migrate(assoc, oldsk, newsk); * queued to the backlog. This prevents a potential race between
/* Migrate the association to the new socket. */ * backlog processing on the old socket and new-packet processing
* on the new socket.
*/
sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk); sctp_assoc_migrate(assoc, newsk);
spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept() /* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag. * is called, set RCV_SHUTDOWN flag.
...@@ -5619,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -5619,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED; newsk->sk_state = SCTP_SS_ESTABLISHED;
sctp_release_sock(newsk);
} }
/* This proto struct describes the ULP interface for SCTP. */ /* This proto struct describes the ULP interface for SCTP. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment