Commit 78398488 authored by Sridhar Samudrala's avatar Sridhar Samudrala

Merge us.ibm.com:/home/sridhar/BK/linux-2.6.0-test1

into us.ibm.com:/home/sridhar/BK/lksctp-2.5.75
parents 8916e8f1 c15987c4
...@@ -231,7 +231,8 @@ struct sctp_chunk *sctp_make_data_empty(struct sctp_association *, ...@@ -231,7 +231,8 @@ struct sctp_chunk *sctp_make_data_empty(struct sctp_association *,
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *, struct sctp_chunk *sctp_make_ecne(const struct sctp_association *,
const __u32); const __u32);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *); struct sctp_chunk *sctp_make_sack(const struct sctp_association *);
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc); struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
const struct sctp_chunk *); const struct sctp_chunk *);
struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
* Jon Grimm <jgrimm@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org> * La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us> * Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -72,9 +73,10 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb) ...@@ -72,9 +73,10 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
} }
struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int gfp); struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int gfp);
struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *, int flags); void sctp_ulpevent_init(struct sctp_ulpevent *, int flags);
void sctp_ulpevent_free(struct sctp_ulpevent *); void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *); int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc, const struct sctp_association *asoc,
......
...@@ -877,7 +877,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) ...@@ -877,7 +877,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
/* Delete the association from the old endpoint's list of /* Delete the association from the old endpoint's list of
* associations. * associations.
*/ */
list_del(&assoc->asocs); list_del_init(&assoc->asocs);
/* Decrement the backlog value for a TCP-style socket. */ /* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP)) if (sctp_style(oldsk, TCP))
......
...@@ -85,7 +85,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg) ...@@ -85,7 +85,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
/* Release all references. */ /* Release all references. */
list_for_each_safe(pos, temp, &msg->chunks) { list_for_each_safe(pos, temp, &msg->chunks) {
list_del(pos); list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list); chunk = list_entry(pos, struct sctp_chunk, frag_list);
/* Check whether we _really_ need to notify. */ /* Check whether we _really_ need to notify. */
if (notify < 0) { if (notify < 0) {
...@@ -294,7 +294,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, ...@@ -294,7 +294,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
errout: errout:
list_for_each_safe(pos, temp, &msg->chunks) { list_for_each_safe(pos, temp, &msg->chunks) {
list_del(pos); list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list); chunk = list_entry(pos, struct sctp_chunk, frag_list);
sctp_chunk_free(chunk); sctp_chunk_free(chunk);
} }
......
...@@ -97,6 +97,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, ...@@ -97,6 +97,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet->source_port = sport; packet->source_port = sport;
packet->destination_port = dport; packet->destination_port = dport;
skb_queue_head_init(&packet->chunks); skb_queue_head_init(&packet->chunks);
packet->size = SCTP_IP_OVERHEAD;
packet->vtag = 0; packet->vtag = 0;
packet->ecn_capable = 0; packet->ecn_capable = 0;
packet->get_prepend_chunk = NULL; packet->get_prepend_chunk = NULL;
...@@ -219,9 +220,8 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, ...@@ -219,9 +220,8 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* Both control chunks and data chunks with TSNs are /* Both control chunks and data chunks with TSNs are
* non-fragmentable. * non-fragmentable.
*/ */
if (packet_empty) { if (packet_empty || !sctp_chunk_is_data(chunk)) {
/* We no longer do re-fragmentation.
/* We no longer do refragmentation at all.
* Just fragment at the IP layer, if we * Just fragment at the IP layer, if we
* actually hit this condition * actually hit this condition
*/ */
...@@ -229,7 +229,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, ...@@ -229,7 +229,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
packet->ipfragok = 1; packet->ipfragok = 1;
goto append; goto append;
} else { /* !packet_empty */ } else {
retval = SCTP_XMIT_PMTU_FULL; retval = SCTP_XMIT_PMTU_FULL;
goto finish; goto finish;
} }
...@@ -283,20 +283,18 @@ int sctp_packet_transmit(struct sctp_packet *packet) ...@@ -283,20 +283,18 @@ int sctp_packet_transmit(struct sctp_packet *packet)
__u8 has_data = 0; __u8 has_data = 0;
struct dst_entry *dst; struct dst_entry *dst;
/* Do NOT generate a chunkless packet... */ /* Do NOT generate a chunkless packet. */
if (skb_queue_empty(&packet->chunks)) chunk = (struct sctp_chunk *)skb_peek(&packet->chunks);
if (unlikely(!chunk))
return err; return err;
/* Set up convenience variables... */ /* Set up convenience variables... */
chunk = (struct sctp_chunk *) (packet->chunks.next);
sk = chunk->skb->sk; sk = chunk->skb->sk;
/* Allocate the new skb. */ /* Allocate the new skb. */
nskb = dev_alloc_skb(packet->size); nskb = dev_alloc_skb(packet->size);
if (!nskb) { if (!nskb)
err = -ENOMEM; goto nomem;
goto out;
}
/* Make sure the outbound skb has enough header room reserved. */ /* Make sure the outbound skb has enough header room reserved. */
skb_reserve(nskb, SCTP_IP_OVERHEAD); skb_reserve(nskb, SCTP_IP_OVERHEAD);
...@@ -468,9 +466,11 @@ int sctp_packet_transmit(struct sctp_packet *packet) ...@@ -468,9 +466,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
if (!nskb->dst) if (!nskb->dst)
goto no_route; goto no_route;
SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb length %d\n", SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
nskb->len); nskb->len);
(*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok);
out: out:
packet->size = SCTP_IP_OVERHEAD; packet->size = SCTP_IP_OVERHEAD;
return err; return err;
...@@ -486,7 +486,20 @@ int sctp_packet_transmit(struct sctp_packet *packet) ...@@ -486,7 +486,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* required. * required.
*/ */
/* err = -EHOSTUNREACH; */ /* err = -EHOSTUNREACH; */
err:
/* Control chunks are unreliable so just drop them. DATA chunks
* will get resent or dropped later.
*/
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
goto out; goto out;
nomem:
err = -ENOMEM;
printk("%s alloc_skb failed.\n", __FUNCTION__);
goto err;
} }
/******************************************************************** /********************************************************************
......
...@@ -258,7 +258,7 @@ void sctp_outq_teardown(struct sctp_outq *q) ...@@ -258,7 +258,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away chunks that have been gap ACKed. */ /* Throw away chunks that have been gap ACKed. */
list_for_each_safe(lchunk, temp, &q->sacked) { list_for_each_safe(lchunk, temp, &q->sacked) {
list_del(lchunk); list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk, chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list); transmitted_list);
sctp_datamsg_fail(chunk, q->error); sctp_datamsg_fail(chunk, q->error);
...@@ -267,7 +267,7 @@ void sctp_outq_teardown(struct sctp_outq *q) ...@@ -267,7 +267,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away any chunks in the retransmit queue. */ /* Throw away any chunks in the retransmit queue. */
list_for_each_safe(lchunk, temp, &q->retransmit) { list_for_each_safe(lchunk, temp, &q->retransmit) {
list_del(lchunk); list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk, chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list); transmitted_list);
sctp_datamsg_fail(chunk, q->error); sctp_datamsg_fail(chunk, q->error);
...@@ -445,7 +445,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, ...@@ -445,7 +445,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
/* Move the chunk to the retransmit queue. The chunks /* Move the chunk to the retransmit queue. The chunks
* on the retransmit queue is always kept in order. * on the retransmit queue is always kept in order.
*/ */
list_del(lchunk); list_del_init(lchunk);
sctp_retransmit_insert(lchunk, q); sctp_retransmit_insert(lchunk, q);
} }
} }
...@@ -1007,7 +1007,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) ...@@ -1007,7 +1007,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
struct sctp_association *asoc = q->asoc; struct sctp_association *asoc = q->asoc;
struct sctp_transport *transport; struct sctp_transport *transport;
struct sctp_chunk *tchunk; struct sctp_chunk *tchunk;
struct list_head *lchunk, *transport_list, *pos; struct list_head *lchunk, *transport_list, *pos, *temp;
sctp_sack_variable_t *frags = sack->variable; sctp_sack_variable_t *frags = sack->variable;
__u32 sack_ctsn, ctsn, tsn; __u32 sack_ctsn, ctsn, tsn;
__u32 highest_tsn, highest_new_tsn; __u32 highest_tsn, highest_new_tsn;
...@@ -1115,14 +1115,12 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) ...@@ -1115,14 +1115,12 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
"%p is 0x%x.\n", __FUNCTION__, asoc, ctsn); "%p is 0x%x.\n", __FUNCTION__, asoc, ctsn);
/* Throw away stuff rotting on the sack queue. */ /* Throw away stuff rotting on the sack queue. */
list_for_each(lchunk, &q->sacked) { list_for_each_safe(lchunk, temp, &q->sacked) {
tchunk = list_entry(lchunk, struct sctp_chunk, tchunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list); transmitted_list);
tsn = ntohl(tchunk->subh.data_hdr->tsn); tsn = ntohl(tchunk->subh.data_hdr->tsn);
if (TSN_lte(tsn, ctsn)) { if (TSN_lte(tsn, ctsn))
lchunk = lchunk->prev;
sctp_chunk_free(tchunk); sctp_chunk_free(tchunk);
}
} }
/* ii) Set rwnd equal to the newly received a_rwnd minus the /* ii) Set rwnd equal to the newly received a_rwnd minus the
......
...@@ -667,7 +667,8 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) ...@@ -667,7 +667,8 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
} }
/* Make a SHUTDOWN chunk. */ /* Make a SHUTDOWN chunk. */
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc) struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{ {
struct sctp_chunk *retval; struct sctp_chunk *retval;
sctp_shutdownhdr_t shut; sctp_shutdownhdr_t shut;
...@@ -683,6 +684,9 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc) ...@@ -683,6 +684,9 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc)
retval->subh.shutdown_hdr = retval->subh.shutdown_hdr =
sctp_addto_chunk(retval, sizeof(shut), &shut); sctp_addto_chunk(retval, sizeof(shut), &shut);
if (chunk)
retval->transport = chunk->transport;
nodata: nodata:
return retval; return retval;
} }
...@@ -1089,7 +1093,7 @@ void sctp_chunk_free(struct sctp_chunk *chunk) ...@@ -1089,7 +1093,7 @@ void sctp_chunk_free(struct sctp_chunk *chunk)
{ {
/* Make sure that we are not on any list. */ /* Make sure that we are not on any list. */
skb_unlink((struct sk_buff *) chunk); skb_unlink((struct sk_buff *) chunk);
list_del(&chunk->transmitted_list); list_del_init(&chunk->transmitted_list);
/* Release our reference on the message tracker. */ /* Release our reference on the message tracker. */
if (chunk->msg) if (chunk->msg)
...@@ -1850,7 +1854,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, ...@@ -1850,7 +1854,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
/* Release the transport structures. */ /* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports); transport = list_entry(pos, struct sctp_transport, transports);
list_del(pos); list_del_init(pos);
sctp_transport_free(transport); sctp_transport_free(transport);
} }
nomem: nomem:
......
...@@ -962,7 +962,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, ...@@ -962,7 +962,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
asoc->overall_error_count = 0; asoc->overall_error_count = 0;
/* Generate a SHUTDOWN chunk. */ /* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc); new_obj = sctp_make_shutdown(asoc, chunk);
if (!new_obj) if (!new_obj)
goto nomem; goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
......
...@@ -3862,7 +3862,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( ...@@ -3862,7 +3862,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
* in the Cumulative TSN Ack field the last sequential TSN it * in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer. * has received from the peer.
*/ */
reply = sctp_make_shutdown(asoc); reply = sctp_make_shutdown(asoc, NULL);
if (!reply) if (!reply)
goto nomem; goto nomem;
...@@ -4179,7 +4179,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, ...@@ -4179,7 +4179,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
switch (asoc->state) { switch (asoc->state) {
case SCTP_STATE_SHUTDOWN_SENT: case SCTP_STATE_SHUTDOWN_SENT:
reply = sctp_make_shutdown(asoc); reply = sctp_make_shutdown(asoc, NULL);
break; break;
case SCTP_STATE_SHUTDOWN_ACK_SENT: case SCTP_STATE_SHUTDOWN_ACK_SENT:
......
...@@ -768,8 +768,8 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) ...@@ -768,8 +768,8 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
} }
/* Clean up any skbs sitting on the receive queue. */ /* Clean up any skbs sitting on the receive queue. */
skb_queue_purge(&sk->sk_receive_queue); sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
skb_queue_purge(&sctp_sk(sk)->pd_lobby); sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* On a TCP-style socket, block for at most linger_time if set. */ /* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout) if (sctp_style(sk, TCP) && timeout)
...@@ -1342,8 +1342,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1342,8 +1342,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
/* When only partial message is copied to the user, increase /* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read, * rwnd by that amount. If all the data in the skb is read,
* rwnd is updated when the skb's destructor is called via * rwnd is updated when the event is freed.
* sctp_ulpevent_free().
*/ */
sctp_assoc_rwnd_increase(event->asoc, copied); sctp_assoc_rwnd_increase(event->asoc, copied);
goto out; goto out;
...@@ -1354,7 +1353,18 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1354,7 +1353,18 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
msg->msg_flags &= ~MSG_EOR; msg->msg_flags &= ~MSG_EOR;
out_free: out_free:
sctp_ulpevent_free(event); /* Free the skb. */ if (flags & MSG_PEEK) {
/* Release the skb reference acquired after peeking the skb in
* sctp_skb_recv_datagram().
*/
kfree_skb(skb);
} else {
/* Free the event which includes releasing the reference to
* the owner of the skb, freeing the skb and updating the
* rwnd.
*/
sctp_ulpevent_free(event);
}
out: out:
sctp_release_sock(sk); sctp_release_sock(sk);
return err; return err;
......
...@@ -117,6 +117,7 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, ...@@ -117,6 +117,7 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
INIT_LIST_HEAD(&peer->transmitted); INIT_LIST_HEAD(&peer->transmitted);
INIT_LIST_HEAD(&peer->send_ready); INIT_LIST_HEAD(&peer->send_ready);
INIT_LIST_HEAD(&peer->transports); INIT_LIST_HEAD(&peer->transports);
sctp_packet_init(&peer->packet, peer, 0, 0);
/* Set up the retransmission timer. */ /* Set up the retransmission timer. */
init_timer(&peer->T3_rtx_timer); init_timer(&peer->T3_rtx_timer);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
* Written or modified by: * Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org> * La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -46,10 +47,12 @@ ...@@ -46,10 +47,12 @@
#include <net/sctp/sctp.h> #include <net/sctp/sctp.h>
#include <net/sctp/sm.h> #include <net/sctp/sm.h>
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
struct sctp_association *asoc); const struct sctp_association *asoc);
static void sctp_ulpevent_set_owner(struct sk_buff *skb, static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event);
const struct sctp_association *asoc); static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc);
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
/* Create a new sctp_ulpevent. */ /* Create a new sctp_ulpevent. */
struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int gfp) struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int gfp)
...@@ -62,30 +65,19 @@ struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int gfp) ...@@ -62,30 +65,19 @@ struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int gfp)
goto fail; goto fail;
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
event = sctp_ulpevent_init(event, msg_flags); sctp_ulpevent_init(event, msg_flags);
if (!event)
goto fail_init;
return event; return event;
fail_init:
kfree_skb(skb);
fail: fail:
return NULL; return NULL;
} }
/* Initialize an ULP event from an given skb. */ /* Initialize an ULP event from an given skb. */
struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *event, void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
int msg_flags)
{ {
memset(event, sizeof(struct sctp_ulpevent), 0x00); memset(event, sizeof(struct sctp_ulpevent), 0x00);
event->msg_flags = msg_flags; event->msg_flags = msg_flags;
return event;
}
/* Dispose of an event. */
void sctp_ulpevent_free(struct sctp_ulpevent *event)
{
kfree_skb(sctp_event2skb(event));
} }
/* Is this a MSG_NOTIFICATION? */ /* Is this a MSG_NOTIFICATION? */
...@@ -189,7 +181,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( ...@@ -189,7 +181,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
* All notifications for a given association have the same association * All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored. * identifier. For TCP style socket, this field is ignored.
*/ */
sctp_ulpevent_set_owner(skb, asoc); sctp_ulpevent_set_owner(event, asoc);
sac->sac_assoc_id = sctp_assoc2id(asoc); sac->sac_assoc_id = sctp_assoc2id(asoc);
return event; return event;
...@@ -281,7 +273,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( ...@@ -281,7 +273,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
* All notifications for a given association have the same association * All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored. * identifier. For TCP style socket, this field is ignored.
*/ */
sctp_ulpevent_set_owner(skb, asoc); sctp_ulpevent_set_owner(event, asoc);
spc->spc_assoc_id = sctp_assoc2id(asoc); spc->spc_assoc_id = sctp_assoc2id(asoc);
/* Sockets API Extensions for SCTP /* Sockets API Extensions for SCTP
...@@ -336,7 +328,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( ...@@ -336,7 +328,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Copy the skb to a new skb with room for us to prepend /* Copy the skb to a new skb with room for us to prepend
* notification with. * notification with.
*/ */
skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
0, gfp); 0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */ /* Pull off the rest of the cause TLV from the chunk. */
...@@ -346,10 +338,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( ...@@ -346,10 +338,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Embed the event fields inside the cloned skb. */ /* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
event = sctp_ulpevent_init(event, MSG_NOTIFICATION); sctp_ulpevent_init(event, MSG_NOTIFICATION);
if (!event)
goto fail;
sre = (struct sctp_remote_error *) sre = (struct sctp_remote_error *)
skb_push(skb, sizeof(struct sctp_remote_error)); skb_push(skb, sizeof(struct sctp_remote_error));
...@@ -402,8 +391,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( ...@@ -402,8 +391,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
* All notifications for a given association have the same association * All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored. * identifier. For TCP style socket, this field is ignored.
*/ */
skb = sctp_event2skb(event); sctp_ulpevent_set_owner(event, asoc);
sctp_ulpevent_set_owner(skb, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc); sre->sre_assoc_id = sctp_assoc2id(asoc);
return event; return event;
...@@ -442,9 +430,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( ...@@ -442,9 +430,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
/* Embed the event fields inside the cloned skb. */ /* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
event = sctp_ulpevent_init(event, MSG_NOTIFICATION); sctp_ulpevent_init(event, MSG_NOTIFICATION);
if (!event)
goto fail;
ssf = (struct sctp_send_failed *) ssf = (struct sctp_send_failed *)
skb_push(skb, sizeof(struct sctp_send_failed)); skb_push(skb, sizeof(struct sctp_send_failed));
...@@ -502,7 +488,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( ...@@ -502,7 +488,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));
/* Per TSVWG discussion with Randy. Allow the application to /* Per TSVWG discussion with Randy. Allow the application to
* ressemble a fragmented message. * ressemble a fragmented message.
*/ */
ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags;
...@@ -515,8 +501,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( ...@@ -515,8 +501,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
* same association identifier. For TCP style socket, this field is * same association identifier. For TCP style socket, this field is
* ignored. * ignored.
*/ */
skb = sctp_event2skb(event); sctp_ulpevent_set_owner(event, asoc);
sctp_ulpevent_set_owner(skb, asoc);
ssf->ssf_assoc_id = sctp_assoc2id(asoc); ssf->ssf_assoc_id = sctp_assoc2id(asoc);
return event; return event;
...@@ -579,7 +564,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( ...@@ -579,7 +564,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
* All notifications for a given association have the same association * All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored. * identifier. For TCP style socket, this field is ignored.
*/ */
sctp_ulpevent_set_owner(skb, asoc); sctp_ulpevent_set_owner(event, asoc);
sse->sse_assoc_id = sctp_assoc2id(asoc); sse->sse_assoc_id = sctp_assoc2id(asoc);
return event; return event;
...@@ -596,12 +581,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( ...@@ -596,12 +581,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
* 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*/ */
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
struct sctp_chunk *chunk, struct sctp_chunk *chunk,
int gfp) int gfp)
{ {
struct sctp_ulpevent *event; struct sctp_ulpevent *event;
struct sctp_sndrcvinfo *info; struct sctp_sndrcvinfo *info;
struct sk_buff *skb, *list; struct sk_buff *skb;
size_t padding, len; size_t padding, len;
/* Clone the original skb, sharing the data. */ /* Clone the original skb, sharing the data. */
...@@ -627,24 +612,15 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -627,24 +612,15 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
/* Fixup cloned skb with just this chunks data. */ /* Fixup cloned skb with just this chunks data. */
skb_trim(skb, chunk->chunk_end - padding - skb->data); skb_trim(skb, chunk->chunk_end - padding - skb->data);
/* Set up a destructor to do rwnd accounting. */
sctp_ulpevent_set_owner_r(skb, asoc);
/* Embed the event fields inside the cloned skb. */ /* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
/* Initialize event with flags 0. */ /* Initialize event with flags 0. */
event = sctp_ulpevent_init(event, 0); sctp_ulpevent_init(event, 0);
if (!event)
goto fail_init;
event->iif = sctp_chunk_iif(chunk); event->iif = sctp_chunk_iif(chunk);
/* Note: Not clearing the entire event struct as
* this is just a fragment of the real event. However, sctp_ulpevent_receive_data(event, asoc);
* we still need to do rwnd accounting.
*/
for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
sctp_ulpevent_set_owner_r(list, asoc);
info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo; info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo;
...@@ -735,9 +711,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -735,9 +711,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
return event; return event;
fail_init:
kfree_skb(skb);
fail: fail:
return NULL; return NULL;
} }
...@@ -793,6 +766,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( ...@@ -793,6 +766,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
* *
* The association id field, holds the identifier for the association. * The association id field, holds the identifier for the association.
*/ */
sctp_ulpevent_set_owner(event, asoc);
pd->pdapi_assoc_id = sctp_assoc2id(asoc); pd->pdapi_assoc_id = sctp_assoc2id(asoc);
return event; return event;
...@@ -824,69 +798,114 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, ...@@ -824,69 +798,114 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
} }
} }
/* Do accounting for bytes just read by user. */ /* Stub skb destructor. */
static void sctp_rcvmsg_rfree(struct sk_buff *skb) static void sctp_stub_rfree(struct sk_buff *skb)
{ {
struct sctp_association *asoc; /* WARNING: This function is just a warning not to use the
struct sctp_ulpevent *event; * skb destructor. If the skb is shared, we may get the destructor
* callback on some processor that does not own the sock_lock. This
/* Current stack structures assume that the rcv buffer is * was occuring with PACKET socket applications that were monitoring
* per socket. For UDP style sockets this is not true as * our skbs. We can't take the sock_lock, because we can't risk
* multiple associations may be on a single UDP-style socket. * recursing if we do really own the sock lock. Instead, do all
* Use the local private area of the skb to track the owning * of our rwnd manipulation while we own the sock_lock outright.
* association. */
*/
event = sctp_skb2event(skb);
asoc = event->asoc;
sctp_assoc_rwnd_increase(asoc, skb_headlen(skb));
sctp_association_put(asoc);
} }
/* Charge receive window for bytes received. */ /* Hold the association in case the msg_name needs read out of
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, * the association.
struct sctp_association *asoc) */
static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
const struct sctp_association *asoc)
{ {
struct sctp_ulpevent *event; struct sk_buff *skb;
/* The current stack structures assume that the rcv buffer is /* Cast away the const, as we are just wanting to
* per socket. For UDP-style sockets this is not true as * bump the reference count.
* multiple associations may be on a single UDP-style socket.
* We use the local private area of the skb to track the owning
* association.
*/ */
sctp_association_hold(asoc); sctp_association_hold((struct sctp_association *)asoc);
skb = sctp_event2skb(event);
skb->sk = asoc->base.sk; skb->sk = asoc->base.sk;
event = sctp_skb2event(skb); event->asoc = (struct sctp_association *)asoc;
event->asoc = asoc; skb->destructor = sctp_stub_rfree;
}
skb->destructor = sctp_rcvmsg_rfree; /* A simple destructor to give up the reference to the association. */
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
{
sctp_association_put(event->asoc);
}
/* Do accounting for bytes received and hold a reference to the association
* for each skb.
*/
static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc)
{
struct sk_buff *skb, *frag;
skb = sctp_event2skb(event);
/* Set the owner and charge rwnd for bytes received. */
sctp_ulpevent_set_owner(event, asoc);
sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
/* Note: Not clearing the entire event struct as this is just a
* fragment of the real event. However, we still need to do rwnd
* accounting.
* In general, the skb passed from IP can have only 1 level of
* fragments. But we allow multiple levels of fragments.
*/
for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
}
} }
/* A simple destructor to give up the reference to the association. */ /* Do accounting for bytes just read by user and release the references to
static void sctp_ulpevent_rfree(struct sk_buff *skb) * the association.
*/
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
{ {
struct sctp_ulpevent *event; struct sk_buff *skb, *frag;
event = sctp_skb2event(skb); /* Current stack structures assume that the rcv buffer is
sctp_association_put(event->asoc); * per socket. For UDP style sockets this is not true as
* multiple associations may be on a single UDP-style socket.
* Use the local private area of the skb to track the owning
* association.
*/
skb = sctp_event2skb(event);
sctp_assoc_rwnd_increase(event->asoc, skb_headlen(skb));
/* Don't forget the fragments. */
for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
/* NOTE: skb_shinfos are recursive. Although IP returns
* skb's with only 1 level of fragments, SCTP reassembly can
* increase the levels.
*/
sctp_ulpevent_release_data(sctp_skb2event(frag));
}
sctp_ulpevent_release_owner(event);
} }
/* Hold the association in case the msg_name needs read out of /* Free a ulpevent that has an owner. It includes releasing the reference
* the association. * to the owner, updating the rwnd in case of a DATA event and freeing the
* skb.
* See comments in sctp_stub_rfree().
*/ */
static void sctp_ulpevent_set_owner(struct sk_buff *skb, void sctp_ulpevent_free(struct sctp_ulpevent *event)
const struct sctp_association *asoc)
{ {
struct sctp_ulpevent *event; if (sctp_ulpevent_is_notification(event))
sctp_ulpevent_release_owner(event);
else
sctp_ulpevent_release_data(event);
/* Cast away the const, as we are just wanting to kfree_skb(sctp_event2skb(event));
* bump the reference count. }
*/
sctp_association_hold((struct sctp_association *)asoc); /* Purge the skb lists holding ulpevents. */
skb->sk = asoc->base.sk; void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
event = sctp_skb2event(skb); {
event->asoc = (struct sctp_association *)asoc; struct sk_buff *skb;
skb->destructor = sctp_ulpevent_rfree; while ((skb = skb_dequeue(list)) != NULL)
sctp_ulpevent_free(sctp_skb2event(skb));
} }
...@@ -235,9 +235,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) ...@@ -235,9 +235,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
out_free: out_free:
if (sctp_event2skb(event)->list) if (sctp_event2skb(event)->list)
skb_queue_purge(sctp_event2skb(event)->list); sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
else else
kfree_skb(sctp_event2skb(event)); sctp_ulpevent_free(event);
return 0; return 0;
} }
...@@ -289,7 +289,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, ...@@ -289,7 +289,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them. * payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist. * We add the rest of skb's to the first skb's fraglist.
*/ */
static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
{ {
struct sk_buff *pos; struct sk_buff *pos;
struct sctp_ulpevent *event; struct sctp_ulpevent *event;
...@@ -325,11 +325,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff * ...@@ -325,11 +325,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
/* Remove the fragment from the reassembly queue. */ /* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, pos->list); __skb_unlink(pos, pos->list);
/* Break if we have reached the last fragment. */ /* Break if we have reached the last fragment. */
if (pos == l_frag) if (pos == l_frag)
break; break;
pos->next = pnext; pos->next = pnext;
pos = pnext; pos = pnext;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment