Commit d7d16a89 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add skb_queue_empty_lockless()

Some paths call skb_queue_empty() without holding
the queue lock. We must use a barrier in order
to not let the compiler do strange things, and avoid
KCSAN splats.

Adding a barrier in skb_queue_empty() might be overkill,
I prefer adding a new helper to clearly identify
points where the callers might be lockless. This might
help us finding real bugs.

The corresponding WRITE_ONCE() should add zero cost
for current compilers.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fc11078d
...@@ -1495,6 +1495,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) ...@@ -1495,6 +1495,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
return list->next == (const struct sk_buff *) list; return list->next == (const struct sk_buff *) list;
} }
/**
* skb_queue_empty_lockless - check if a queue is empty
* @list: queue head
*
* Returns true if the queue is empty, false otherwise.
* This variant can be used in lockless contexts.
*/
static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
{
return READ_ONCE(list->next) == (const struct sk_buff *) list;
}
/** /**
* skb_queue_is_last - check if skb is the last entry in the queue * skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head * @list: queue head
...@@ -1848,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk, ...@@ -1848,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next, struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list) struct sk_buff_head *list)
{ {
newsk->next = next; /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
newsk->prev = prev; WRITE_ONCE(newsk->next, next);
next->prev = prev->next = newsk; WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
WRITE_ONCE(prev->next, newsk);
list->qlen++; list->qlen++;
} }
...@@ -1861,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list, ...@@ -1861,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *first = list->next; struct sk_buff *first = list->next;
struct sk_buff *last = list->prev; struct sk_buff *last = list->prev;
first->prev = prev; WRITE_ONCE(first->prev, prev);
prev->next = first; WRITE_ONCE(prev->next, first);
last->next = next; WRITE_ONCE(last->next, next);
next->prev = last; WRITE_ONCE(next->prev, last);
} }
/** /**
...@@ -2006,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) ...@@ -2006,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
next = skb->next; next = skb->next;
prev = skb->prev; prev = skb->prev;
skb->next = skb->prev = NULL; skb->next = skb->prev = NULL;
next->prev = prev; WRITE_ONCE(next->prev, prev);
prev->next = next; WRITE_ONCE(prev->next, next);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment