Commit 78398488 authored by Sridhar Samudrala's avatar Sridhar Samudrala

Merge us.ibm.com:/home/sridhar/BK/linux-2.6.0-test1

into us.ibm.com:/home/sridhar/BK/lksctp-2.5.75
parents 8916e8f1 c15987c4
......@@ -231,7 +231,8 @@ struct sctp_chunk *sctp_make_data_empty(struct sctp_association *,
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *,
const __u32);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *);
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
const struct sctp_chunk *);
struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
......
......@@ -40,6 +40,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -72,9 +73,10 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
}
struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int gfp);
struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *, int flags);
void sctp_ulpevent_init(struct sctp_ulpevent *, int flags);
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
......
......@@ -877,7 +877,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
/* Delete the association from the old endpoint's list of
* associations.
*/
list_del(&assoc->asocs);
list_del_init(&assoc->asocs);
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
......
......@@ -85,7 +85,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
/* Release all references. */
list_for_each_safe(pos, temp, &msg->chunks) {
list_del(pos);
list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list);
/* Check whether we _really_ need to notify. */
if (notify < 0) {
......@@ -294,7 +294,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
errout:
list_for_each_safe(pos, temp, &msg->chunks) {
list_del(pos);
list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list);
sctp_chunk_free(chunk);
}
......
......@@ -97,6 +97,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet->source_port = sport;
packet->destination_port = dport;
skb_queue_head_init(&packet->chunks);
packet->size = SCTP_IP_OVERHEAD;
packet->vtag = 0;
packet->ecn_capable = 0;
packet->get_prepend_chunk = NULL;
......@@ -219,9 +220,8 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* Both control chunks and data chunks with TSNs are
* non-fragmentable.
*/
if (packet_empty) {
/* We no longer do refragmentation at all.
if (packet_empty || !sctp_chunk_is_data(chunk)) {
/* We no longer do re-fragmentation.
* Just fragment at the IP layer, if we
* actually hit this condition
*/
......@@ -229,7 +229,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
packet->ipfragok = 1;
goto append;
} else { /* !packet_empty */
} else {
retval = SCTP_XMIT_PMTU_FULL;
goto finish;
}
......@@ -283,20 +283,18 @@ int sctp_packet_transmit(struct sctp_packet *packet)
__u8 has_data = 0;
struct dst_entry *dst;
/* Do NOT generate a chunkless packet... */
if (skb_queue_empty(&packet->chunks))
/* Do NOT generate a chunkless packet. */
chunk = (struct sctp_chunk *)skb_peek(&packet->chunks);
if (unlikely(!chunk))
return err;
/* Set up convenience variables... */
chunk = (struct sctp_chunk *) (packet->chunks.next);
sk = chunk->skb->sk;
/* Allocate the new skb. */
nskb = dev_alloc_skb(packet->size);
if (!nskb) {
err = -ENOMEM;
goto out;
}
if (!nskb)
goto nomem;
/* Make sure the outbound skb has enough header room reserved. */
skb_reserve(nskb, SCTP_IP_OVERHEAD);
......@@ -468,9 +466,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
if (!nskb->dst)
goto no_route;
SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb length %d\n",
SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
nskb->len);
(*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok);
out:
packet->size = SCTP_IP_OVERHEAD;
return err;
......@@ -486,7 +486,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* required.
*/
/* err = -EHOSTUNREACH; */
err:
/* Control chunks are unreliable so just drop them. DATA chunks
* will get resent or dropped later.
*/
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
goto out;
nomem:
err = -ENOMEM;
printk("%s alloc_skb failed.\n", __FUNCTION__);
goto err;
}
/********************************************************************
......
......@@ -258,7 +258,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away chunks that have been gap ACKed. */
list_for_each_safe(lchunk, temp, &q->sacked) {
list_del(lchunk);
list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
sctp_datamsg_fail(chunk, q->error);
......@@ -267,7 +267,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away any chunks in the retransmit queue. */
list_for_each_safe(lchunk, temp, &q->retransmit) {
list_del(lchunk);
list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
sctp_datamsg_fail(chunk, q->error);
......@@ -445,7 +445,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
/* Move the chunk to the retransmit queue. The chunks
* on the retransmit queue is always kept in order.
*/
list_del(lchunk);
list_del_init(lchunk);
sctp_retransmit_insert(lchunk, q);
}
}
......@@ -1007,7 +1007,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
struct sctp_association *asoc = q->asoc;
struct sctp_transport *transport;
struct sctp_chunk *tchunk;
struct list_head *lchunk, *transport_list, *pos;
struct list_head *lchunk, *transport_list, *pos, *temp;
sctp_sack_variable_t *frags = sack->variable;
__u32 sack_ctsn, ctsn, tsn;
__u32 highest_tsn, highest_new_tsn;
......@@ -1115,14 +1115,12 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
"%p is 0x%x.\n", __FUNCTION__, asoc, ctsn);
/* Throw away stuff rotting on the sack queue. */
list_for_each(lchunk, &q->sacked) {
list_for_each_safe(lchunk, temp, &q->sacked) {
tchunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
tsn = ntohl(tchunk->subh.data_hdr->tsn);
if (TSN_lte(tsn, ctsn)) {
lchunk = lchunk->prev;
if (TSN_lte(tsn, ctsn))
sctp_chunk_free(tchunk);
}
}
/* ii) Set rwnd equal to the newly received a_rwnd minus the
......
......@@ -667,7 +667,8 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
}
/* Make a SHUTDOWN chunk. */
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc)
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
struct sctp_chunk *retval;
sctp_shutdownhdr_t shut;
......@@ -683,6 +684,9 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc)
retval->subh.shutdown_hdr =
sctp_addto_chunk(retval, sizeof(shut), &shut);
if (chunk)
retval->transport = chunk->transport;
nodata:
return retval;
}
......@@ -1089,7 +1093,7 @@ void sctp_chunk_free(struct sctp_chunk *chunk)
{
/* Make sure that we are not on any list. */
skb_unlink((struct sk_buff *) chunk);
list_del(&chunk->transmitted_list);
list_del_init(&chunk->transmitted_list);
/* Release our reference on the message tracker. */
if (chunk->msg)
......@@ -1850,7 +1854,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
list_del(pos);
list_del_init(pos);
sctp_transport_free(transport);
}
nomem:
......
......@@ -962,7 +962,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
asoc->overall_error_count = 0;
/* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc);
new_obj = sctp_make_shutdown(asoc, chunk);
if (!new_obj)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
......
......@@ -3862,7 +3862,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
* in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer.
*/
reply = sctp_make_shutdown(asoc);
reply = sctp_make_shutdown(asoc, NULL);
if (!reply)
goto nomem;
......@@ -4179,7 +4179,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
switch (asoc->state) {
case SCTP_STATE_SHUTDOWN_SENT:
reply = sctp_make_shutdown(asoc);
reply = sctp_make_shutdown(asoc, NULL);
break;
case SCTP_STATE_SHUTDOWN_ACK_SENT:
......
......@@ -768,8 +768,8 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
}
/* Clean up any skbs sitting on the receive queue. */
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sctp_sk(sk)->pd_lobby);
sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
......@@ -1342,8 +1342,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
* rwnd is updated when the skb's destructor is called via
* sctp_ulpevent_free().
* rwnd is updated when the event is freed.
*/
sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
......@@ -1354,7 +1353,18 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
msg->msg_flags &= ~MSG_EOR;
out_free:
sctp_ulpevent_free(event); /* Free the skb. */
if (flags & MSG_PEEK) {
/* Release the skb reference acquired after peeking the skb in
* sctp_skb_recv_datagram().
*/
kfree_skb(skb);
} else {
/* Free the event which includes releasing the reference to
* the owner of the skb, freeing the skb and updating the
* rwnd.
*/
sctp_ulpevent_free(event);
}
out:
sctp_release_sock(sk);
return err;
......
......@@ -117,6 +117,7 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
INIT_LIST_HEAD(&peer->transmitted);
INIT_LIST_HEAD(&peer->send_ready);
INIT_LIST_HEAD(&peer->transports);
sctp_packet_init(&peer->packet, peer, 0, 0);
/* Set up the retransmission timer. */
init_timer(&peer->T3_rtx_timer);
......
This diff is collapsed.
......@@ -235,9 +235,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
out_free:
if (sctp_event2skb(event)->list)
skb_queue_purge(sctp_event2skb(event)->list);
sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
else
kfree_skb(sctp_event2skb(event));
sctp_ulpevent_free(event);
return 0;
}
......@@ -289,7 +289,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sctp_ulpevent *event;
......@@ -325,11 +325,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, pos->list);
/* Break if we have reached the last fragment. */
if (pos == l_frag)
break;
pos->next = pnext;
pos = pnext;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment