Commit 8af9f729 authored by David S. Miller's avatar David S. Miller

Merge branch 'sctp-skb-list'

David Miller says:

====================
SCTP: Event skb list overhaul.

This patch series eliminates the explicit reference to the skb list
implementation via skb->prev dereferences.

The approach used is to pass a non-empty skb list around instead of an
event skb object which may or may not be on a list.

I'd like to thank Marcelo Leitner, Xin Long, and Neil Horman for
reviewing previous versions of this series.

Testing would be very much appreciated, in addition to the review of
course.

v4 --> v5: Rebase to net-next

v3 --> v4: Fix the logic in patch #4 so that we don't miss cases
           where we should add event to the on-stack temp list.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9994677c 013b96ec
...@@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *); ...@@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *);
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Add a new event for propagation to the ULP. */ /* Add a new event for propagation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
/* Renege previously received chunks. */ /* Renege previously received chunks. */
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
......
...@@ -484,14 +484,15 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq, ...@@ -484,14 +484,15 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
} }
static int sctp_enqueue_event(struct sctp_ulpq *ulpq, static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event) struct sk_buff_head *skb_list)
{ {
struct sk_buff *skb = sctp_event2skb(event);
struct sock *sk = ulpq->asoc->base.sk; struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk); struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff_head *skb_list; struct sctp_ulpevent *event;
struct sk_buff *skb;
skb_list = (struct sk_buff_head *)skb->prev; skb = __skb_peek(skb_list);
event = sctp_skb2event(skb);
if (sk->sk_shutdown & RCV_SHUTDOWN && if (sk->sk_shutdown & RCV_SHUTDOWN &&
(sk->sk_shutdown & SEND_SHUTDOWN || (sk->sk_shutdown & SEND_SHUTDOWN ||
...@@ -858,19 +859,24 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq, ...@@ -858,19 +859,24 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
if (!(event->msg_flags & SCTP_DATA_UNORDERED)) { if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
event = sctp_intl_reasm(ulpq, event); event = sctp_intl_reasm(ulpq, event);
if (event && event->msg_flags & MSG_EOR) { if (event) {
skb_queue_head_init(&temp); skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event)); __skb_queue_tail(&temp, sctp_event2skb(event));
if (event->msg_flags & MSG_EOR)
event = sctp_intl_order(ulpq, event); event = sctp_intl_order(ulpq, event);
} }
} else { } else {
event = sctp_intl_reasm_uo(ulpq, event); event = sctp_intl_reasm_uo(ulpq, event);
if (event) {
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
}
} }
if (event) { if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_enqueue_event(ulpq, event); sctp_enqueue_event(ulpq, &temp);
} }
return event_eor; return event_eor;
...@@ -944,20 +950,27 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) ...@@ -944,20 +950,27 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{ {
struct sctp_ulpevent *event; struct sctp_ulpevent *event;
struct sk_buff_head temp;
if (!skb_queue_empty(&ulpq->reasm)) { if (!skb_queue_empty(&ulpq->reasm)) {
do { do {
event = sctp_intl_retrieve_first(ulpq); event = sctp_intl_retrieve_first(ulpq);
if (event) if (event) {
sctp_enqueue_event(ulpq, event); skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
sctp_enqueue_event(ulpq, &temp);
}
} while (event); } while (event);
} }
if (!skb_queue_empty(&ulpq->reasm_uo)) { if (!skb_queue_empty(&ulpq->reasm_uo)) {
do { do {
event = sctp_intl_retrieve_first_uo(ulpq); event = sctp_intl_retrieve_first_uo(ulpq);
if (event) if (event) {
sctp_enqueue_event(ulpq, event); skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
sctp_enqueue_event(ulpq, &temp);
}
} while (event); } while (event);
} }
} }
...@@ -1059,7 +1072,7 @@ static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) ...@@ -1059,7 +1072,7 @@ static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
if (event) { if (event) {
sctp_intl_retrieve_ordered(ulpq, event); sctp_intl_retrieve_ordered(ulpq, event);
sctp_enqueue_event(ulpq, event); sctp_enqueue_event(ulpq, &temp);
} }
} }
...@@ -1298,6 +1311,15 @@ static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) ...@@ -1298,6 +1311,15 @@ static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
ntohl(skip->mid), skip->flags); ntohl(skip->mid), skip->flags);
} }
static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sk_buff_head temp;
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
return sctp_ulpq_tail_event(ulpq, &temp);
}
static struct sctp_stream_interleave sctp_stream_interleave_0 = { static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.data_chunk_len = sizeof(struct sctp_data_chunk), .data_chunk_len = sizeof(struct sctp_data_chunk),
.ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk), .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
...@@ -1306,7 +1328,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = { ...@@ -1306,7 +1328,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.assign_number = sctp_chunk_assign_ssn, .assign_number = sctp_chunk_assign_ssn,
.validate_data = sctp_validate_data, .validate_data = sctp_validate_data,
.ulpevent_data = sctp_ulpq_tail_data, .ulpevent_data = sctp_ulpq_tail_data,
.enqueue_event = sctp_ulpq_tail_event, .enqueue_event = do_ulpq_tail_event,
.renege_events = sctp_ulpq_renege, .renege_events = sctp_ulpq_renege,
.start_pd = sctp_ulpq_partial_delivery, .start_pd = sctp_ulpq_partial_delivery,
.abort_pd = sctp_ulpq_abort_pd, .abort_pd = sctp_ulpq_abort_pd,
...@@ -1317,6 +1339,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = { ...@@ -1317,6 +1339,16 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.handle_ftsn = sctp_handle_fwdtsn, .handle_ftsn = sctp_handle_fwdtsn,
}; };
static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff_head temp;
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
return sctp_enqueue_event(ulpq, &temp);
}
static struct sctp_stream_interleave sctp_stream_interleave_1 = { static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.data_chunk_len = sizeof(struct sctp_idata_chunk), .data_chunk_len = sizeof(struct sctp_idata_chunk),
.ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk), .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
...@@ -1325,7 +1357,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = { ...@@ -1325,7 +1357,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.assign_number = sctp_chunk_assign_mid, .assign_number = sctp_chunk_assign_mid,
.validate_data = sctp_validate_idata, .validate_data = sctp_validate_idata,
.ulpevent_data = sctp_ulpevent_idata, .ulpevent_data = sctp_ulpevent_idata,
.enqueue_event = sctp_enqueue_event, .enqueue_event = do_sctp_enqueue_event,
.renege_events = sctp_renege_events, .renege_events = sctp_renege_events,
.start_pd = sctp_intl_start_pd, .start_pd = sctp_intl_start_pd,
.abort_pd = sctp_intl_abort_pd, .abort_pd = sctp_intl_abort_pd,
......
...@@ -116,11 +116,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, ...@@ -116,11 +116,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
event = sctp_ulpq_reasm(ulpq, event); event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */ /* Do ordering if needed. */
if ((event) && (event->msg_flags & MSG_EOR)) { if (event) {
/* Create a temporary list to collect chunks on. */ /* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp); skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event)); __skb_queue_tail(&temp, sctp_event2skb(event));
if (event->msg_flags & MSG_EOR)
event = sctp_ulpq_order(ulpq, event); event = sctp_ulpq_order(ulpq, event);
} }
...@@ -129,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, ...@@ -129,7 +130,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
*/ */
if (event) { if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, &temp);
} }
return event_eor; return event_eor;
...@@ -193,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) ...@@ -193,18 +194,17 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
} }
/* If the SKB of 'event' is on a list, it is the first such member int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
* of that list.
*/
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{ {
struct sock *sk = ulpq->asoc->base.sk; struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk); struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff_head *queue, *skb_list; struct sctp_ulpevent *event;
struct sk_buff *skb = sctp_event2skb(event); struct sk_buff_head *queue;
struct sk_buff *skb;
int clear_pd = 0; int clear_pd = 0;
skb_list = (struct sk_buff_head *) skb->prev; skb = __skb_peek(skb_list);
event = sctp_skb2event(skb);
/* If the socket is just going to throw this away, do not /* If the socket is just going to throw this away, do not
* even try to deliver it. * even try to deliver it.
...@@ -257,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) ...@@ -257,13 +257,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
} }
} }
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
if (skb_list)
skb_queue_splice_tail_init(skb_list, queue); skb_queue_splice_tail_init(skb_list, queue);
else
__skb_queue_tail(queue, skb);
/* Did we just complete partial delivery and need to get /* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive * rolling again? Move pending data to the receive
...@@ -738,25 +732,25 @@ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) ...@@ -738,25 +732,25 @@ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
{ {
struct sctp_ulpevent *event = NULL; struct sctp_ulpevent *event = NULL;
struct sk_buff_head temp;
if (skb_queue_empty(&ulpq->reasm)) if (skb_queue_empty(&ulpq->reasm))
return; return;
while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
/* Do ordering if needed. */ struct sk_buff_head temp;
if ((event) && (event->msg_flags & MSG_EOR)) {
skb_queue_head_init(&temp); skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event)); __skb_queue_tail(&temp, sctp_event2skb(event));
/* Do ordering if needed. */
if (event->msg_flags & MSG_EOR)
event = sctp_ulpq_order(ulpq, event); event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the /* Send event to the ULP. 'event' is the
* sctp_ulpevent for very first SKB on the temp' list. * sctp_ulpevent for very first SKB on the temp' list.
*/ */
if (event) if (event)
sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, &temp);
} }
} }
...@@ -956,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) ...@@ -956,7 +950,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
if (event) { if (event) {
/* see if we have more ordered that we can deliver */ /* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event); sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, &temp);
} }
} }
...@@ -1082,7 +1076,11 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, ...@@ -1082,7 +1076,11 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
event = sctp_ulpq_retrieve_first(ulpq); event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */ /* Send event to the ULP. */
if (event) { if (event) {
sctp_ulpq_tail_event(ulpq, event); struct sk_buff_head temp;
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
sctp_ulpq_tail_event(ulpq, &temp);
sctp_ulpq_set_pd(ulpq); sctp_ulpq_set_pd(ulpq);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment