Commit a8305bff authored by David S. Miller's avatar David S. Miller

net: Add and use skb_mark_not_on_list().

An SKB is not on a list if skb->next is NULL.

Codify this convention into a helper function and use it
where we are dequeueing an SKB and need to mark it as such.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 776f07ee
...@@ -1339,6 +1339,11 @@ static inline void skb_zcopy_abort(struct sk_buff *skb) ...@@ -1339,6 +1339,11 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
} }
} }
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
}
/** /**
* skb_queue_empty - check if a queue is empty * skb_queue_empty - check if a queue is empty
* @list: queue head * @list: queue head
......
...@@ -3231,7 +3231,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de ...@@ -3231,7 +3231,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
while (skb) { while (skb) {
struct sk_buff *next = skb->next; struct sk_buff *next = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
rc = xmit_one(skb, dev, txq, next != NULL); rc = xmit_one(skb, dev, txq, next != NULL);
if (unlikely(!dev_xmit_complete(rc))) { if (unlikely(!dev_xmit_complete(rc))) {
skb->next = next; skb->next = next;
...@@ -3331,7 +3331,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d ...@@ -3331,7 +3331,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
for (; skb != NULL; skb = next) { for (; skb != NULL; skb = next) {
next = skb->next; next = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
/* in case skb wont be segmented, point to itself */ /* in case skb wont be segmented, point to itself */
skb->prev = skb; skb->prev = skb;
...@@ -5296,7 +5296,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, ...@@ -5296,7 +5296,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
return; return;
list_del(&skb->list); list_del(&skb->list);
skb->next = NULL; skb_mark_not_on_list(skb);
napi_gro_complete(skb); napi_gro_complete(skb);
napi->gro_hash[index].count--; napi->gro_hash[index].count--;
} }
...@@ -5482,7 +5482,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ...@@ -5482,7 +5482,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (pp) { if (pp) {
list_del(&pp->list); list_del(&pp->list);
pp->next = NULL; skb_mark_not_on_list(pp);
napi_gro_complete(pp); napi_gro_complete(pp);
napi->gro_hash[hash].count--; napi->gro_hash[hash].count--;
} }
......
...@@ -2332,7 +2332,7 @@ static void __release_sock(struct sock *sk) ...@@ -2332,7 +2332,7 @@ static void __release_sock(struct sock *sk)
next = skb->next; next = skb->next;
prefetch(next); prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb)); WARN_ON_ONCE(skb_dst_is_noref(skb));
skb->next = NULL; skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb); sk_backlog_rcv(sk, skb);
cond_resched(); cond_resched();
......
...@@ -260,7 +260,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, ...@@ -260,7 +260,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
} }
sub_frag_mem_limit(fq->q.net, sum_truesize); sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL; skb_mark_not_on_list(head);
head->dev = ldev; head->dev = ldev;
head->tstamp = fq->q.stamp; head->tstamp = fq->q.stamp;
......
...@@ -623,7 +623,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, ...@@ -623,7 +623,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
sub_frag_mem_limit(qp->q.net, head->truesize); sub_frag_mem_limit(qp->q.net, head->truesize);
*nextp = NULL; *nextp = NULL;
head->next = NULL; skb_mark_not_on_list(head);
head->prev = NULL; head->prev = NULL;
head->dev = dev; head->dev = dev;
head->tstamp = qp->q.stamp; head->tstamp = qp->q.stamp;
......
...@@ -535,7 +535,7 @@ static void ip_sublist_rcv_finish(struct list_head *head) ...@@ -535,7 +535,7 @@ static void ip_sublist_rcv_finish(struct list_head *head)
/* Handle ip{6}_forward case, as sch_direct_xmit have /* Handle ip{6}_forward case, as sch_direct_xmit have
* another kind of SKB-list usage (see validate_xmit_skb_list) * another kind of SKB-list usage (see validate_xmit_skb_list)
*/ */
skb->next = NULL; skb_mark_not_on_list(skb);
dst_input(skb); dst_input(skb);
} }
} }
......
...@@ -278,7 +278,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, ...@@ -278,7 +278,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *nskb = segs->next; struct sk_buff *nskb = segs->next;
int err; int err;
segs->next = NULL; skb_mark_not_on_list(segs);
err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
if (err && ret == 0) if (err && ret == 0)
...@@ -684,7 +684,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, ...@@ -684,7 +684,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag; skb = frag;
frag = skb->next; frag = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
} }
if (err == 0) { if (err == 0) {
......
...@@ -727,7 +727,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, ...@@ -727,7 +727,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag; skb = frag;
frag = skb->next; frag = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
} }
kfree(tmp_hdr); kfree(tmp_hdr);
......
...@@ -449,7 +449,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic ...@@ -449,7 +449,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
sub_frag_mem_limit(fq->q.net, head->truesize); sub_frag_mem_limit(fq->q.net, head->truesize);
head->ignore_df = 1; head->ignore_df = 1;
head->next = NULL; skb_mark_not_on_list(head);
head->dev = dev; head->dev = dev;
head->tstamp = fq->q.stamp; head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len); ipv6_hdr(head)->payload_len = htons(payload_len);
......
...@@ -388,7 +388,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, ...@@ -388,7 +388,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
} }
sub_frag_mem_limit(fq->q.net, sum_truesize); sub_frag_mem_limit(fq->q.net, sum_truesize);
head->next = NULL; skb_mark_not_on_list(head);
head->dev = dev; head->dev = dev;
head->tstamp = fq->q.stamp; head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len); ipv6_hdr(head)->payload_len = htons(payload_len);
......
...@@ -764,7 +764,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, ...@@ -764,7 +764,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
return ret; return ret;
} }
skb->next = NULL; skb_mark_not_on_list(skb);
entry_seg = nf_queue_entry_dup(entry); entry_seg = nf_queue_entry_dup(entry);
if (entry_seg) { if (entry_seg) {
......
...@@ -259,7 +259,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ...@@ -259,7 +259,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
while (list) { while (list) {
skb = list; skb = list;
list = skb->next; list = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
rxrpc_free_skb(skb, rxrpc_skb_tx_freed); rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
} }
} }
......
...@@ -812,7 +812,7 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow) ...@@ -812,7 +812,7 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow)
if (skb) { if (skb) {
flow->head = skb->next; flow->head = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
} }
return skb; return skb;
...@@ -1252,7 +1252,7 @@ static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, ...@@ -1252,7 +1252,7 @@ static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
else else
flow->head = elig_ack->next; flow->head = elig_ack->next;
elig_ack->next = NULL; skb_mark_not_on_list(elig_ack);
return elig_ack; return elig_ack;
} }
...@@ -1675,7 +1675,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -1675,7 +1675,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
while (segs) { while (segs) {
nskb = segs->next; nskb = segs->next;
segs->next = NULL; skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
cobalt_set_enqueue_time(segs, now); cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
......
...@@ -319,7 +319,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) ...@@ -319,7 +319,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
if (skb) { if (skb) {
flow->head = skb->next; flow->head = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
flow->qlen--; flow->qlen--;
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--; sch->q.qlen--;
......
...@@ -124,7 +124,7 @@ static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) ...@@ -124,7 +124,7 @@ static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
struct sk_buff *skb = flow->head; struct sk_buff *skb = flow->head;
flow->head = skb->next; flow->head = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
return skb; return skb;
} }
......
...@@ -184,7 +184,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, ...@@ -184,7 +184,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
skb = nskb; skb = nskb;
(*packets)++; /* GSO counts as one pkt */ (*packets)++; /* GSO counts as one pkt */
} }
skb->next = NULL; skb_mark_not_on_list(skb);
} }
/* This variant of try_bulk_dequeue_skb() makes sure /* This variant of try_bulk_dequeue_skb() makes sure
...@@ -210,7 +210,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, ...@@ -210,7 +210,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
skb = nskb; skb = nskb;
} while (++cnt < 8); } while (++cnt < 8);
(*packets) += cnt; (*packets) += cnt;
skb->next = NULL; skb_mark_not_on_list(skb);
} }
/* Note that dequeue_skb can possibly return a SKB list (via skb->next). /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
......
...@@ -330,7 +330,7 @@ static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) ...@@ -330,7 +330,7 @@ static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
struct sk_buff *skb = bucket->head; struct sk_buff *skb = bucket->head;
bucket->head = skb->next; bucket->head = skb->next;
skb->next = NULL; skb_mark_not_on_list(skb);
return skb; return skb;
} }
......
...@@ -568,7 +568,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -568,7 +568,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (segs) { if (segs) {
while (segs) { while (segs) {
skb2 = segs->next; skb2 = segs->next;
segs->next = NULL; skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len; last_len = segs->len;
rc = qdisc_enqueue(segs, sch, to_free); rc = qdisc_enqueue(segs, sch, to_free);
......
...@@ -162,7 +162,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, ...@@ -162,7 +162,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
nb = 0; nb = 0;
while (segs) { while (segs) {
nskb = segs->next; nskb = segs->next;
segs->next = NULL; skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len; len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc, to_free); ret = qdisc_enqueue(segs, q->qdisc, to_free);
......
...@@ -577,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, ...@@ -577,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_dereference_rtnl(orig_dev->tipc_ptr); rcu_dereference_rtnl(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) && if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) { (skb->pkt_type <= PACKET_MULTICAST))) {
skb->next = NULL; skb_mark_not_on_list(skb);
tipc_rcv(dev_net(b->pt.dev), skb, b); tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock(); rcu_read_unlock();
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
......
...@@ -99,7 +99,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur ...@@ -99,7 +99,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
do { do {
struct sk_buff *nskb = skb2->next; struct sk_buff *nskb = skb2->next;
skb2->next = NULL; skb_mark_not_on_list(skb2);
xo = xfrm_offload(skb2); xo = xfrm_offload(skb2);
xo->flags |= XFRM_DEV_RESUME; xo->flags |= XFRM_DEV_RESUME;
......
...@@ -189,7 +189,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb ...@@ -189,7 +189,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
struct sk_buff *nskb = segs->next; struct sk_buff *nskb = segs->next;
int err; int err;
segs->next = NULL; skb_mark_not_on_list(segs);
err = xfrm_output2(net, sk, segs); err = xfrm_output2(net, sk, segs);
if (unlikely(err)) { if (unlikely(err)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment