Commit b3b87077 authored by David S. Miller's avatar David S. Miller

Merge branch 'sctp-stream-interleave'

Xin Long says:

====================
sctp: Implement Stream Interleave: Interaction with Other SCTP Extensions

Stream Interleave would be implemented in two Parts:

   1. The I-DATA Chunk Supporting User Message Interleaving
   2. Interaction with Other SCTP Extensions

Overview in section 2.3 of RFC8260 for Part 2:

   The usage of the I-DATA chunk might interfere with other SCTP
   extensions.  Future SCTP extensions MUST describe if and how they
   interfere with the usage of I-DATA chunks.  For the SCTP extensions
   already defined when this document was published, the details are
   given in the following subsections.

As the 2nd part of Stream Interleave Implementation, this patchset mostly
adds the support for SCTP Partial Reliability Extension with I-FORWARD-TSN
chunk. Then adjusts stream scheduler and stream reconfig to make them work
properly with I-DATA chunks.

In the last patch, all stream interleave codes will be enabled by adding
sysctl to allow users to use this feature.

v1 -> v2:
  - removed the intl_enable check from sctp_chunk_event_lookup, as Marcelo's
    suggestion.
  - fixed a typo in changelog.
====================
Acked-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 02477809 463118c3
......@@ -110,6 +110,7 @@ enum sctp_cid {
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
}; /* enum */
......@@ -616,6 +617,22 @@ struct sctp_fwdtsn_chunk {
struct sctp_fwdtsn_hdr fwdtsn_hdr;
};
struct sctp_ifwdtsn_skip {
__be16 stream;
__u8 reserved;
__u8 flags;
__be32 mid;
};
struct sctp_ifwdtsn_hdr {
__be32 new_cum_tsn;
struct sctp_ifwdtsn_skip skip[0];
};
struct sctp_ifwdtsn_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_ifwdtsn_hdr fwdtsn_hdr;
};
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
......
......@@ -199,6 +199,9 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
__u8 flags, int paylen, gfp_t gfp);
struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_ifwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int len, __u8 flags, gfp_t gfp);
......
......@@ -33,6 +33,7 @@
struct sctp_stream_interleave {
__u16 data_chunk_len;
__u16 ftsn_chunk_len;
/* (I-)DATA process */
struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
......@@ -47,6 +48,12 @@ struct sctp_stream_interleave {
struct sctp_chunk *chunk, gfp_t gfp);
void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
/* (I-)FORWARD-TSN process */
void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
bool (*validate_ftsn)(struct sctp_chunk *chunk);
void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
void (*handle_ftsn)(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk);
};
void sctp_stream_interleave_init(struct sctp_stream *stream);
......
......@@ -599,6 +599,7 @@ struct sctp_chunk {
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_authhdr *auth_hdr;
struct sctp_idatahdr *idata_hdr;
struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
} subh;
__u8 *chunk_end;
......@@ -1099,6 +1100,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
/* Uncork and flush an outqueue. */
static inline void sctp_outq_cork(struct sctp_outq *q)
{
......@@ -1441,6 +1443,16 @@ static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
}
static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
{
return stream->si->ftsn_chunk_len;
}
static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
{
return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
}
/* SCTP_GET_ASSOC_STATS counters */
struct sctp_priv_assoc_stats {
/* Maximum observed rto in the association during subsequent
......
......@@ -67,8 +67,6 @@ static void sctp_mark_missing(struct sctp_outq *q,
__u32 highest_new_tsn,
int count_of_newacks);
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
......@@ -591,7 +589,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
* following the procedures outlined in C1 - C5.
*/
if (reason == SCTP_RTXR_T3_RTX)
sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
/* Flush the queues only on timeout, since fast_rtx is only
* triggered during sack processing and the queue
......@@ -942,6 +940,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
case SCTP_CID_ECN_ECNE:
case SCTP_CID_ASCONF:
case SCTP_CID_FWD_TSN:
case SCTP_CID_I_FWD_TSN:
case SCTP_CID_RECONF:
status = sctp_packet_transmit_chunk(packet, chunk,
one_packet, gfp);
......@@ -956,7 +955,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
* sender MUST assure that at least one T3-rtx
* timer is running.
*/
if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
sctp_transport_reset_t3_rtx(transport);
transport->last_time_sent = jiffies;
}
......@@ -1372,7 +1372,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
asoc->peer.rwnd = sack_a_rwnd;
sctp_generate_fwdtsn(q, sack_ctsn);
asoc->stream.si->generate_ftsn(q, sack_ctsn);
pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
......@@ -1795,7 +1795,7 @@ static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
}
/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
{
struct sctp_association *asoc = q->asoc;
struct sctp_chunk *ftsn_chunk = NULL;
......
......@@ -3536,6 +3536,30 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
return retval;
}
struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_ifwdtsn_skip *skiplist)
{
struct sctp_chunk *retval = NULL;
struct sctp_ifwdtsn_hdr ftsn_hdr;
size_t hint;
hint = (nstreams + 1) * sizeof(__u32);
retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
GFP_ATOMIC);
if (!retval)
return NULL;
ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
retval->subh.ifwdtsn_hdr =
sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
return retval;
}
/* RE-CONFIG 3.1 (RE-CONFIG chunk)
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
......
......@@ -1007,18 +1007,6 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
}
}
/* Process variable FWDTSN chunk information. */
static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk)
{
struct sctp_fwdtsn_skip *skip;
/* Walk through all the skipped SSNs */
sctp_walk_fwdtsn(skip, chunk) {
sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
}
}
/* Helper function to remove the association non-primary peer
* transports.
*/
......@@ -1368,18 +1356,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_REPORT_FWDTSN:
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
/* purge the fragmentation queue */
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
break;
case SCTP_CMD_PROCESS_FWDTSN:
sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
asoc->stream.si->handle_ftsn(&asoc->ulpq,
cmd->obj.chunk);
break;
case SCTP_CMD_GEN_SACK:
......
......@@ -3957,7 +3957,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
{
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
......@@ -3971,7 +3970,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
......@@ -3990,14 +3989,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->stream.incnt)
goto discard_noforce;
}
if (!asoc->stream.si->validate_ftsn(chunk))
goto discard_noforce;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
if (len > sctp_ftsnhdr_len(&asoc->stream))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
......@@ -4028,7 +4024,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
{
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
......@@ -4042,7 +4037,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the FORWARD_TSN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
......@@ -4061,14 +4056,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->stream.incnt)
goto gen_shutdown;
}
if (!asoc->stream.si->validate_ftsn(chunk))
goto gen_shutdown;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
if (len > sctp_ftsnhdr_len(&asoc->stream))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
......
......@@ -985,14 +985,14 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
if (state > SCTP_STATE_MAX)
return &bug;
if (net->sctp.intl_enable && cid == SCTP_CID_I_DATA)
if (cid == SCTP_CID_I_DATA)
cid = SCTP_CID_DATA;
if (cid <= SCTP_CID_BASE_MAX)
return &chunk_event_table[cid][state];
if (net->sctp.prsctp_enable) {
if (cid == SCTP_CID_FWD_TSN)
if (cid == SCTP_CID_FWD_TSN || cid == SCTP_CID_I_FWD_TSN)
return &prsctp_chunk_event_table[0][state];
}
......
......@@ -216,11 +216,13 @@ void sctp_stream_clear(struct sctp_stream *stream)
{
int i;
for (i = 0; i < stream->outcnt; i++)
stream->out[i].ssn = 0;
for (i = 0; i < stream->outcnt; i++) {
stream->out[i].mid = 0;
stream->out[i].mid_uo = 0;
}
for (i = 0; i < stream->incnt; i++)
stream->in[i].ssn = 0;
stream->in[i].mid = 0;
}
void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
......@@ -607,10 +609,10 @@ struct sctp_chunk *sctp_process_strreset_outreq(
}
for (i = 0; i < nums; i++)
stream->in[ntohs(str_p[i])].ssn = 0;
stream->in[ntohs(str_p[i])].mid = 0;
} else {
for (i = 0; i < stream->incnt; i++)
stream->in[i].ssn = 0;
stream->in[i].mid = 0;
}
result = SCTP_STRRESET_PERFORMED;
......@@ -754,8 +756,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
* performed.
*/
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
/* G1: Compute an appropriate value for the Receiver's Next TSN -- the
* TSN that the peer should use to send the next DATA chunk. The
......@@ -784,10 +785,12 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
/* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
* incoming and outgoing streams.
*/
for (i = 0; i < stream->outcnt; i++)
stream->out[i].ssn = 0;
for (i = 0; i < stream->outcnt; i++) {
stream->out[i].mid = 0;
stream->out[i].mid_uo = 0;
}
for (i = 0; i < stream->incnt; i++)
stream->in[i].ssn = 0;
stream->in[i].mid = 0;
result = SCTP_STRRESET_PERFORMED;
......@@ -977,11 +980,15 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (result == SCTP_STRRESET_PERFORMED) {
if (nums) {
for (i = 0; i < nums; i++)
stream->out[ntohs(str_p[i])].ssn = 0;
for (i = 0; i < nums; i++) {
stream->out[ntohs(str_p[i])].mid = 0;
stream->out[ntohs(str_p[i])].mid_uo = 0;
}
} else {
for (i = 0; i < stream->outcnt; i++)
stream->out[i].ssn = 0;
for (i = 0; i < stream->outcnt; i++) {
stream->out[i].mid = 0;
stream->out[i].mid_uo = 0;
}
}
flags = SCTP_STREAM_RESET_OUTGOING_SSN;
......@@ -1024,8 +1031,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
&asoc->peer.tsn_map);
LIST_HEAD(temp);
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
sctp_tsnmap_init(&asoc->peer.tsn_map,
SCTP_TSN_MAP_INITIAL,
......@@ -1043,10 +1049,12 @@ struct sctp_chunk *sctp_process_strreset_resp(
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
for (i = 0; i < stream->outcnt; i++)
stream->out[i].ssn = 0;
for (i = 0; i < stream->outcnt; i++) {
stream->out[i].mid = 0;
stream->out[i].mid_uo = 0;
}
for (i = 0; i < stream->incnt; i++)
stream->in[i].ssn = 0;
stream->in[i].mid = 0;
}
for (i = 0; i < stream->outcnt; i++)
......
......@@ -1082,8 +1082,213 @@ static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
sctp_ulpq_flush(ulpq);
}
static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
int nskips, __be16 stream, __u8 flags)
{
int i;
for (i = 0; i < nskips; i++)
if (skiplist[i].stream == stream &&
skiplist[i].flags == flags)
return i;
return i;
}
#define SCTP_FTSN_U_BIT 0x1
static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
{
struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
struct sctp_association *asoc = q->asoc;
struct sctp_chunk *ftsn_chunk = NULL;
struct list_head *lchunk, *temp;
int nskips = 0, skip_pos;
struct sctp_chunk *chunk;
__u32 tsn;
if (!asoc->peer.prsctp_capable)
return;
if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
asoc->adv_peer_ack_point = ctsn;
list_for_each_safe(lchunk, temp, &q->abandoned) {
chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
tsn = ntohl(chunk->subh.data_hdr->tsn);
if (TSN_lte(tsn, ctsn)) {
list_del_init(lchunk);
sctp_chunk_free(chunk);
} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
__be16 sid = chunk->subh.idata_hdr->stream;
__be32 mid = chunk->subh.idata_hdr->mid;
__u8 flags = 0;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
flags |= SCTP_FTSN_U_BIT;
asoc->adv_peer_ack_point = tsn;
skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
sid, flags);
ftsn_skip_arr[skip_pos].stream = sid;
ftsn_skip_arr[skip_pos].reserved = 0;
ftsn_skip_arr[skip_pos].flags = flags;
ftsn_skip_arr[skip_pos].mid = mid;
if (skip_pos == nskips)
nskips++;
if (nskips == 10)
break;
} else {
break;
}
}
if (asoc->adv_peer_ack_point > ctsn)
ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
nskips, &ftsn_skip_arr[0]);
if (ftsn_chunk) {
list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
}
}
#define _sctp_walk_ifwdtsn(pos, chunk, end) \
for (pos = chunk->subh.ifwdtsn_hdr->skip; \
(void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
#define sctp_walk_ifwdtsn(pos, ch) \
_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
sizeof(struct sctp_ifwdtsn_chunk))
static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
{
struct sctp_fwdtsn_skip *skip;
__u16 incnt;
if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
return false;
incnt = chunk->asoc->stream.incnt;
sctp_walk_fwdtsn(skip, chunk)
if (ntohs(skip->stream) >= incnt)
return false;
return true;
}
static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
{
struct sctp_ifwdtsn_skip *skip;
__u16 incnt;
if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
return false;
incnt = chunk->asoc->stream.incnt;
sctp_walk_ifwdtsn(skip, chunk)
if (ntohs(skip->stream) >= incnt)
return false;
return true;
}
static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
{
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
/* purge the fragmentation queue */
sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
}
static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
{
struct sk_buff *pos, *tmp;
skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
struct sctp_ulpevent *event = sctp_skb2event(pos);
__u32 tsn = event->tsn;
if (TSN_lte(tsn, ftsn)) {
__skb_unlink(pos, &ulpq->reasm);
sctp_ulpevent_free(event);
}
}
skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
struct sctp_ulpevent *event = sctp_skb2event(pos);
__u32 tsn = event->tsn;
if (TSN_lte(tsn, ftsn)) {
__skb_unlink(pos, &ulpq->reasm_uo);
sctp_ulpevent_free(event);
}
}
}
static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
{
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
/* purge the fragmentation queue */
sctp_intl_reasm_flushtsn(ulpq, ftsn);
/* abort only when it's for all data */
if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
}
static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
{
struct sctp_fwdtsn_skip *skip;
/* Walk through all the skipped SSNs */
sctp_walk_fwdtsn(skip, chunk)
sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
}
static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
__u8 flags)
{
struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
struct sctp_stream *stream = &ulpq->asoc->stream;
if (flags & SCTP_FTSN_U_BIT) {
if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
sin->pd_mode_uo = 0;
sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
GFP_ATOMIC);
}
return;
}
if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
return;
if (sin->pd_mode) {
sin->pd_mode = 0;
sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
}
sctp_mid_skip(stream, in, sid, mid);
sctp_intl_reap_ordered(ulpq, sid);
}
static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
{
struct sctp_ifwdtsn_skip *skip;
/* Walk through all the skipped MIDs and abort stream pd if possible */
sctp_walk_ifwdtsn(skip, chunk)
sctp_intl_skip(ulpq, ntohs(skip->stream),
ntohl(skip->mid), skip->flags);
}
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.data_chunk_len = sizeof(struct sctp_data_chunk),
.ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
/* DATA process functions */
.make_datafrag = sctp_make_datafrag_empty,
.assign_number = sctp_chunk_assign_ssn,
......@@ -1093,10 +1298,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.renege_events = sctp_ulpq_renege,
.start_pd = sctp_ulpq_partial_delivery,
.abort_pd = sctp_ulpq_abort_pd,
/* FORWARD-TSN process functions */
.generate_ftsn = sctp_generate_fwdtsn,
.validate_ftsn = sctp_validate_fwdtsn,
.report_ftsn = sctp_report_fwdtsn,
.handle_ftsn = sctp_handle_fwdtsn,
};
static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.data_chunk_len = sizeof(struct sctp_idata_chunk),
.ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
/* I-DATA process functions */
.make_datafrag = sctp_make_idatafrag_empty,
.assign_number = sctp_chunk_assign_mid,
......@@ -1106,6 +1317,11 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.renege_events = sctp_renege_events,
.start_pd = sctp_intl_start_pd,
.abort_pd = sctp_intl_abort_pd,
/* I-FORWARD-TSN process functions */
.generate_ftsn = sctp_generate_iftsn,
.validate_ftsn = sctp_validate_iftsn,
.report_ftsn = sctp_report_iftsn,
.handle_ftsn = sctp_handle_iftsn,
};
void sctp_stream_interleave_init(struct sctp_stream *stream)
......
......@@ -242,7 +242,8 @@ int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
{
if (!list_is_last(&ch->frag_list, &ch->msg->chunks)) {
if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
!q->asoc->intl_enable) {
struct sctp_stream_out *sout;
__u16 sid;
......
......@@ -288,6 +288,13 @@ static struct ctl_table sctp_net_table[] = {
.mode = 0644,
.proc_handler = proc_sctp_do_auth,
},
{
.procname = "intl_enable",
.data = &init_net.sctp.intl_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "addr_scope_policy",
.data = &init_net.sctp.scope_policy,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment