Commit 294cb923 authored by Jon Grimm's avatar Jon Grimm

Merge http://linux-lksctp.bkbits.net/lksctp-2.5

into touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5.work
parents 9967b51f 28131078
......@@ -68,7 +68,6 @@ typedef enum {
SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
SCTP_CMD_REPORT_BIGGAP, /* Narc on a TSN (it was too high). */
SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
......@@ -86,8 +85,8 @@ typedef enum {
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
SCTP_CMD_CHUNK_PD, /* Partial data delivery considerations. */
SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_LAST
} sctp_verb_t;
......
......@@ -12,7 +12,7 @@
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* the SCTP reference implementation is distributed in the hope that it
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
......@@ -23,9 +23,14 @@
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
......@@ -153,15 +158,18 @@ static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
return map->dup_tsns;
}
/* Mark a duplicate TSN. Note: we limit how many we are willing to
* store and consequently report.
/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
* information.
*/
static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
{
if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
map->dup_tsns[map->num_dup_tsns++] = tsn;
map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
}
/* Renege a TSN that was seen. */
void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
/* Is there a gap in the TSN map? */
int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
......@@ -176,6 +184,3 @@ int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *,
struct sctp_tsnmap_iter *,__u16 *start, __u16 *end);
#endif /* __sctp_tsnmap_h__ */
......@@ -66,6 +66,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Add a new event for propogation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
/* Renege previously received chunks. */
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Perform partial delivery. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int);
......
......@@ -1014,8 +1014,8 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->rcvbuf >> 1), asoc->pmtu))) {
SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
"rwnd: %u a_rwnd: %u\n",
__FUNCTION__, asoc, asoc->rwnd, asoc->a_rwnd);
"rwnd: %u a_rwnd: %u\n", __FUNCTION__,
asoc, asoc->rwnd, asoc->a_rwnd);
sack = sctp_make_sack(asoc);
if (!sack)
return;
......
This diff is collapsed.
......@@ -682,7 +682,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -2274,7 +2273,6 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
......@@ -2339,18 +2337,26 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_PD, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) {
/* There is absolutely no room, but this is the most
* important tsn that we are waiting on, try to
* to partial deliver or renege to make room.
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if ((sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
deliver = SCTP_CMD_CHUNK_PD;
if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
(sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
deliver = SCTP_CMD_RENEGE;
} else {
SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
"rwnd: %d\n", tsn, datalen,
......@@ -2379,21 +2385,23 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_INC_STATS(SctpCurrEstab);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
/* If definately accepting the DATA chunk, record its TSN, otherwise
* wait for renege processing.
*/
if (deliver != SCTP_CMD_CHUNK_PD) {
if (SCTP_CMD_CHUNK_ULP == deliver)
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SctpInUnorderChunks);
else
SCTP_INC_STATS(SctpInOrderChunks);
}
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
......@@ -2592,7 +2600,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_INC_STATS(SctpCurrEstab);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
......
......@@ -2183,6 +2183,50 @@ static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
return 0;
}
/*
*
* 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*
* For getsockopt, it get the default sctp_sndrcvinfo structure.
*/
static inline int sctp_getsockopt_set_default_send_param(struct sock *sk,
int len, char *optval, int *optlen)
{
struct sctp_sndrcvinfo info;
sctp_association_t *asoc;
if (len != sizeof(struct sctp_sndrcvinfo))
return -EINVAL;
if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo)))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc)
return -EINVAL;
info.sinfo_stream = asoc->defaults.stream;
info.sinfo_flags = asoc->defaults.flags;
info.sinfo_ppid = asoc->defaults.ppid;
info.sinfo_context = asoc->defaults.context;
info.sinfo_timetolive = asoc->defaults.timetolive;
if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo)))
return -EFAULT;
return 0;
}
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen)
{
......@@ -2260,6 +2304,11 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
optlen);
break;
case SCTP_SET_DEFAULT_SEND_PARAM:
retval = sctp_getsockopt_set_default_send_param(sk, len,
optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
......
......@@ -385,3 +385,23 @@ static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
}
}
}
/* Renege that we have seen a TSN. */
void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
if (TSN_lt(tsn, map->base_tsn))
return;
if (!TSN_lt(tsn, map->base_tsn + map->len + map->len))
return;
/* Assert: TSN is in range. */
gap = tsn - map->base_tsn;
/* Pretend we never saw the TSN. */
if (gap < map->len)
map->tsn_map[gap] = 0;
else
map->overflow_map[gap - map->len] = 0;
}
......@@ -655,32 +655,119 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return event;
}
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
/* Walk backwards through the list, reneges the newest tsns. */
while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, int priority)
{
struct sctp_ulpevent *event;
struct sctp_association *asoc;
asoc = ulpq->asoc;
/* Are we already in partial delivery mode? */
if (!sctp_sk(ulpq->asoc->base.sk)->pd_mode) {
if (!sctp_sk(asoc->base.sk)->pd_mode) {
/* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */
if (event) {
sctp_ulpq_tail_event(ulpq, event);
sctp_sk(ulpq->asoc->base.sk)->pd_mode = 1;
sctp_sk(asoc->base.sk)->pd_mode = 1;
ulpq->pd_mode = 1;
return;
}
}
}
/* Assert: Either already in partial delivery mode or partial
* delivery wasn't possible, so now the only recourse is
* to renege. FIXME: Add renege support starts here.
*/
/* Renege some packets to make room for an incoming chunk. */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
int priority)
{
struct sctp_association *asoc;
__u16 needed, freed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, priority);
sctp_ulpq_partial_delivery(ulpq, chunk, priority);
}
return;
}
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment