Commit 9850a96f authored by Jon Grimm's avatar Jon Grimm

Merge touki.austin.ibm.com:/home/jgrimm/bk/linux-2.5.66

into touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5.work
parents 42382f86 a33b4399
......@@ -138,12 +138,10 @@ typedef enum {
*/
typedef union {
sctp_cid_t chunk;
sctp_event_timeout_t timeout;
sctp_event_other_t other;
sctp_event_primitive_t primitive;
} sctp_subtype_t;
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
......@@ -421,9 +419,9 @@ typedef enum {
/* Reasons to retransmit. */
typedef enum {
SCTP_RETRANSMIT_T3_RTX,
SCTP_RETRANSMIT_FAST_RTX,
SCTP_RETRANSMIT_PMTU_DISCOVERY,
SCTP_RTXR_T3_RTX,
SCTP_RTXR_FAST_RTX,
SCTP_RTXR_PMTUD,
} sctp_retransmit_reason_t;
/* Reasons to lower cwnd. */
......
......@@ -130,7 +130,7 @@ extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
/*
* sctp_socket.c
* sctp/socket.c
*/
extern int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
extern int sctp_inet_listen(struct socket *sock, int backlog);
......@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
/*
* sctp_primitive.c
* sctp/primitive.c
*/
extern int sctp_primitive_ASSOCIATE(sctp_association_t *, void *arg);
extern int sctp_primitive_SHUTDOWN(sctp_association_t *, void *arg);
......@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
extern int sctp_primitive_REQUESTHEARTBEAT(sctp_association_t *, void *arg);
/*
* sctp_crc32c.c
* sctp/crc32c.c
*/
extern __u32 sctp_start_cksum(__u8 *ptr, __u16 count);
extern __u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum);
extern __u32 sctp_end_cksum(__u32 cksum);
/*
* sctp_input.c
* sctp/input.c
*/
extern int sctp_rcv(struct sk_buff *skb);
extern void sctp_v4_err(struct sk_buff *skb, u32 info);
......@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
extern sctp_association_t *__sctp_lookup_association(const union sctp_addr *,
const union sctp_addr *,
struct sctp_transport **);
extern struct sock *sctp_err_lookup(int family, struct sk_buff *,
struct sctphdr *, struct sctp_endpoint **,
struct sctp_association **,
struct sctp_transport **);
extern void sctp_err_finish(struct sock *, struct sctp_endpoint *,
struct sctp_association *);
extern void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
/*
* sctp_hashdriver.c
* sctp/hashdriver.c
*/
extern void sctp_hash_digest(const char *secret, const int secret_len,
const char *text, const int text_len,
......@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#ifdef TEST_FRAME
#include <test_frame.h>
#else
/* spin lock wrappers. */
......@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
static inline void sctp_sysctl_unregister(void) { return; }
#endif
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
......@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
extern int sctp_v6_init(void);
extern void sctp_v6_exit(void);
static inline int sctp_ipv6_addr_type(const struct in6_addr *addr)
{
return ipv6_addr_type((struct in6_addr*) addr);
}
extern void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
int type, int code, int offset, __u32 info);
#else /* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#else /* #ifdef defined(CONFIG_IPV6) */
#define sctp_ipv6_addr_type(a) 0
static inline int sctp_v6_init(void) { return 0; }
static inline void sctp_v6_exit(void) { return; }
#endif /* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#endif /* #if defined(CONFIG_IPV6) */
/* Map an association to an assoc_id. */
static inline sctp_assoc_t sctp_assoc2id(const sctp_association_t *asoc)
......@@ -421,6 +421,15 @@ static inline __s32 sctp_jitter(__u32 rto)
return ret;
}
/* Break down data chunks at this point. */
static inline int sctp_frag_point(int pmtu)
{
pmtu -= SCTP_IP_OVERHEAD + sizeof(struct sctp_data_chunk);
pmtu -= sizeof(struct sctp_sack_chunk);
return pmtu;
}
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
......@@ -537,7 +546,7 @@ struct sctp_sock {
struct sock sk;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct ipv6_pinfo *pinet6;
#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
#endif /* CONFIG_IPV6 */
struct inet_opt inet;
struct sctp_opt sctp;
};
......@@ -550,7 +559,7 @@ struct sctp6_sock {
struct sctp_opt sctp;
struct ipv6_pinfo inet6;
};
#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
#endif /* CONFIG_IPV6 */
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
......
......@@ -590,13 +590,16 @@ struct sctp_packet {
/* This packet should advertise ECN capability to the network
* via the ECT bit.
*/
int ecn_capable;
char ecn_capable;
/* This packet contains a COOKIE-ECHO chunk. */
int has_cookie_echo;
char has_cookie_echo;
/* This packet containsa SACK chunk. */
char has_sack;
/* SCTP cannot fragment this packet. So let ip fragment it. */
int ipfragok;
char ipfragok;
int malloced;
};
......
......@@ -421,8 +421,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
"%d\n", asoc, asoc->pmtu);
asoc->frag_point = asoc->pmtu;
asoc->frag_point -= SCTP_IP_OVERHEAD + sizeof(struct sctp_data_chunk);
asoc->frag_point = sctp_frag_point(asoc->pmtu);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
......@@ -658,31 +657,20 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
}
/* Return an ecne chunk to get prepended to a packet.
* Note: We are sly and return a shared, prealloced chunk.
* Note: We are sly and return a shared, prealloced chunk. FIXME:
* No we don't, but we could/should.
*/
sctp_chunk_t *sctp_get_ecne_prepend(sctp_association_t *asoc)
sctp_chunk_t *sctp_get_ecne_prepend(struct sctp_association *asoc)
{
sctp_chunk_t *chunk;
int need_ecne;
__u32 lowest_tsn;
/* Can be called from task or bh. Both need_ecne and
* last_ecne_tsn are written during bh.
*/
need_ecne = asoc->need_ecne;
lowest_tsn = asoc->last_ecne_tsn;
if (need_ecne) {
chunk = sctp_make_ecne(asoc, lowest_tsn);
struct sctp_chunk *chunk;
/* ECNE is not mandatory to the flow. Being unable to
* alloc mem is not deadly. We are just unable to help
* out the network. If we run out of memory, just return
* NULL.
/* Send ECNE if needed.
* Not being able to allocate a chunk here is not deadly.
*/
} else {
if (asoc->need_ecne)
chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
else
chunk = NULL;
}
return chunk;
}
......@@ -986,8 +974,7 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
if (pmtu) {
asoc->pmtu = pmtu;
asoc->frag_point = pmtu - (SCTP_IP_OVERHEAD +
sizeof(sctp_data_chunk_t));
asoc->frag_point = sctp_frag_point(pmtu);
}
SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
......
......@@ -207,21 +207,19 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
if (sock_owned_by_user(sk))
sk_add_backlog(sk, (struct sk_buff *) chunk);
} else {
else
sctp_backlog_rcv(sk, (struct sk_buff *) chunk);
}
/* Release the sock and any reference counts we took in the
* lookup calls.
*/
sctp_bh_unlock_sock(sk);
if (asoc) {
if (asoc)
sctp_association_put(asoc);
} else {
else
sctp_endpoint_put(ep);
}
sock_put(sk);
return ret;
......@@ -268,10 +266,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
/* Handle icmp frag needed error. */
static inline void sctp_icmp_frag_needed(struct sock *sk,
sctp_association_t *asoc,
struct sctp_transport *transport,
__u32 pmtu)
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
{
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
printk(KERN_WARNING "%s: Reported pmtu %d too low, "
......@@ -280,54 +276,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
pmtu = SCTP_DEFAULT_MINSEGMENT;
}
if (!sock_owned_by_user(sk) && transport && (transport->pmtu != pmtu)) {
transport->pmtu = pmtu;
if (!sock_owned_by_user(sk) && t && (t->pmtu != pmtu)) {
t->pmtu = pmtu;
sctp_assoc_sync_pmtu(asoc);
sctp_retransmit(&asoc->outqueue, transport,
SCTP_RETRANSMIT_PMTU_DISCOVERY );
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void sctp_v4_err(struct sk_buff *skb, __u32 info)
/* Common lookup code for icmp/icmpv6 error handler. */
struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
struct sctphdr *sctphdr,
struct sctp_endpoint **epp,
struct sctp_association **app,
struct sctp_transport **tpp)
{
struct iphdr *iph = (struct iphdr *)skb->data;
struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2));
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
union sctp_addr saddr, daddr;
struct inet_opt *inet;
union sctp_addr saddr;
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
sctp_endpoint_t *ep = NULL;
sctp_association_t *asoc = NULL;
struct sctp_transport *transport;
int err;
struct sctp_endpoint *ep = NULL;
struct sctp_association *asoc = NULL;
struct sctp_transport *transport = NULL;
if (skb->len < ((iph->ihl << 2) + 8)) {
ICMP_INC_STATS_BH(IcmpInErrors);
return;
*app = NULL; *epp = NULL; *tpp = NULL;
af = sctp_get_af_specific(family);
if (unlikely(!af)) {
return NULL;
}
saddr.v4.sin_family = AF_INET;
saddr.v4.sin_port = ntohs(sh->source);
memcpy(&saddr.v4.sin_addr.s_addr, &iph->saddr, sizeof(struct in_addr));
daddr.v4.sin_family = AF_INET;
daddr.v4.sin_port = ntohs(sh->dest);
memcpy(&daddr.v4.sin_addr.s_addr, &iph->daddr, sizeof(struct in_addr));
/* Initialize local addresses for lookups. */
af->from_skb(&saddr, skb, 1);
af->from_skb(&daddr, skb, 0);
/* Look for an association that matches the incoming ICMP error
* packet.
......@@ -340,13 +320,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
*/
ep = __sctp_rcv_lookup_endpoint(&daddr);
if (!ep) {
ICMP_INC_STATS_BH(IcmpInErrors);
return;
return NULL;
}
}
if (asoc) {
if (ntohl(sh->vtag) != asoc->c.peer_vtag) {
if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
......@@ -355,12 +334,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
sk = ep->base.sk;
sctp_bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LockDroppedIcmps);
*epp = ep;
*app = asoc;
*tpp = transport;
return sk;
out:
sock_put(sk);
if (asoc)
sctp_association_put(asoc);
if (ep)
sctp_endpoint_put(ep);
return NULL;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_endpoint *ep,
struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
sock_put(sk);
if (asoc)
sctp_association_put(asoc);
if (ep)
sctp_endpoint_put(ep);
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void sctp_v4_err(struct sk_buff *skb, __u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2));
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
struct sock *sk;
sctp_endpoint_t *ep;
sctp_association_t *asoc;
struct sctp_transport *transport;
struct inet_opt *inet;
char *saveip, *savesctp;
int err;
if (skb->len < ((iph->ihl << 2) + 8)) {
ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
/* Fix up skb to look at the embedded net header. */
saveip = skb->nh.raw;
savesctp = skb->h.raw;
skb->nh.iph = iph;
skb->h.raw = (char *)sh;
sk = sctp_err_lookup(AF_INET, skb, sh, &ep, &asoc, &transport);
/* Put back, the original pointers. */
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch (type) {
case ICMP_PARAMETERPROB:
err = EPROTO;
......@@ -399,13 +456,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
}
out_unlock:
sctp_bh_unlock_sock(sk);
out:
sock_put(sk);
if (asoc)
sctp_association_put(asoc);
if (ep)
sctp_endpoint_put(ep);
sctp_err_finish(sk, ep, asoc);
}
/*
......@@ -782,8 +833,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
return asoc;
}
/* SCTP kernel reference Implementation
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2002 International Business Machines, Corp.
* Copyright (c) 2002-2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[7])
/* FIXME: Comments. */
static inline void sctp_v6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt,
/* ICMP error handler. */
void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
int type, int code, int offset, __u32 info)
{
/* BUG. WRITE ME. */
struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
struct sock *sk;
sctp_endpoint_t *ep;
sctp_association_t *asoc;
struct sctp_transport *transport;
struct ipv6_pinfo *np;
char *saveip, *savesctp;
int err;
/* Fix up skb to look at the embedded net header. */
saveip = skb->nh.raw;
savesctp = skb->h.raw;
skb->nh.ipv6h = iph;
skb->h.raw = (char *)sh;
sk = sctp_err_lookup(AF_INET6, skb, sh, &ep, &asoc, &transport);
/* Put back, the original pointers. */
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
ICMP6_INC_STATS_BH(Icmp6InErrors);
return;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch (type) {
case ICMPV6_PKT_TOOBIG:
sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
goto out_unlock;
default:
break;
}
np = inet6_sk(sk);
icmpv6_err_convert(type, code, &err);
if (!sock_owned_by_user(sk) && np->recverr) {
sk->err = err;
sk->error_report(sk);
} else { /* Only an error on timeout */
sk->err_soft = err;
}
out_unlock:
sctp_err_finish(sk, ep, asoc);
}
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static inline int sctp_v6_xmit(struct sk_buff *skb,
struct sctp_transport *transport, int ipfragok)
static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
int ipfragok)
{
struct sock *sk = skb->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
......
......@@ -79,6 +79,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
packet->ecn_capable = ecn_capable;
packet->get_prepend_chunk = prepend_handler;
packet->has_cookie_echo = 0;
packet->has_sack = 0;
packet->ipfragok = 0;
/* We might need to call the prepend_handler right away. */
......@@ -100,6 +101,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet->ecn_capable = 0;
packet->get_prepend_chunk = NULL;
packet->has_cookie_echo = 0;
packet->has_sack = 0;
packet->ipfragok = 0;
packet->malloced = 0;
sctp_packet_reset(packet);
......@@ -155,6 +157,37 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
return retval;
}
/* Try to bundle a SACK with the packet. */
static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
struct sctp_chunk *chunk)
{
sctp_xmit_t retval = SCTP_XMIT_OK;
/* If sending DATA and haven't aleady bundled a SACK, try to
* bundle one in to the packet.
*/
if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
!pkt->has_cookie_echo) {
struct sctp_association *asoc;
asoc = pkt->transport->asoc;
if (asoc->a_rwnd > asoc->rwnd) {
struct sctp_chunk *sack;
asoc->a_rwnd = asoc->rwnd;
sack = sctp_make_sack(asoc);
if (sack) {
struct timer_list *timer;
retval = sctp_packet_append_chunk(pkt, sack);
asoc->peer.sack_needed = 0;
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
if (timer_pending(timer) && del_timer(timer))
sctp_association_put(asoc);
}
}
}
return retval;
}
/* Append a chunk to the offered packet reporting back any inability to do
* so.
*/
......@@ -167,6 +200,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
size_t pmtu;
int too_big;
retval = sctp_packet_bundle_sack(packet, chunk);
if (retval != SCTP_XMIT_OK)
goto finish;
pmtu = ((packet->transport->asoc) ?
(packet->transport->asoc->pmtu) :
(packet->transport->pmtu));
......@@ -216,9 +253,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
retval = sctp_packet_append_data(packet, chunk);
if (SCTP_XMIT_OK != retval)
goto finish;
} else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type) {
} else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type)
packet->has_cookie_echo = 1;
}
else if (SCTP_CID_SACK == chunk->chunk_hdr->type)
packet->has_sack = 1;
/* It is OK to send this chunk. */
__skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk);
......
......@@ -357,7 +357,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
__u8 fast_retransmit = 0;
switch(reason) {
case SCTP_RETRANSMIT_T3_RTX:
case SCTP_RTXR_T3_RTX:
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
/* Update the retran path if the T3-rtx timer has expired for
* the current retran path.
......@@ -365,10 +365,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
if (transport == transport->asoc->peer.retran_path)
sctp_assoc_update_retran_path(transport->asoc);
break;
case SCTP_RETRANSMIT_FAST_RTX:
case SCTP_RTXR_FAST_RTX:
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
fast_retransmit = 1;
break;
case SCTP_RTXR_PMTUD:
default:
break;
}
......@@ -876,7 +877,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer = 0;
queue = &q->out;
while (NULL != (chunk = sctp_outq_dequeue_data(q))) {
while ((chunk = sctp_outq_dequeue_data(q))) {
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
......@@ -891,9 +892,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
/* Free the chunk. This chunk is not on any
* list yet, just free it.
*/
/* Free the chunk. */
sctp_free_chunk(chunk);
continue;
}
......@@ -1572,7 +1571,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if (transport) {
if (do_fast_retransmit)
sctp_retransmit(q, transport, SCTP_RETRANSMIT_FAST_RTX);
sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d\n",
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment