Commit c420c989 authored by Matteo Croce's avatar Matteo Croce Committed by David S. Miller

skbuff: add a parameter to __skb_frag_unref

This is a prerequisite patch, the next one is enabling recycling of
skbs and fragments. Add an extra argument on __skb_frag_unref() to
handle recycling, and update the current users of the function with that.
Signed-off-by: default avatarMatteo Croce <mcroce@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c07aea3e
...@@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, ...@@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) { if (length == 0) {
/* don't need this page */ /* don't need this page */
__skb_frag_unref(frag); __skb_frag_unref(frag, false);
--skb_shinfo(skb)->nr_frags; --skb_shinfo(skb)->nr_frags;
} else { } else {
size = min(length, (unsigned) PAGE_SIZE); size = min(length, (unsigned) PAGE_SIZE);
......
...@@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, ...@@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
fail: fail:
while (nr > 0) { while (nr > 0) {
nr--; nr--;
__skb_frag_unref(skb_shinfo(skb)->frags + nr); __skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
} }
return 0; return 0;
} }
......
...@@ -3081,10 +3081,12 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) ...@@ -3081,10 +3081,12 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
/** /**
* __skb_frag_unref - release a reference on a paged fragment. * __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment * @frag: the paged fragment
* @recycle: recycle the page if allocated via page_pool
* *
* Releases a reference on the paged fragment @frag. * Releases a reference on the paged fragment @frag
* or recycles the page via the page_pool API.
*/ */
static inline void __skb_frag_unref(skb_frag_t *frag) static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{ {
put_page(skb_frag_page(frag)); put_page(skb_frag_page(frag));
} }
...@@ -3098,7 +3100,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag) ...@@ -3098,7 +3100,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag)
*/ */
static inline void skb_frag_unref(struct sk_buff *skb, int f) static inline void skb_frag_unref(struct sk_buff *skb, int f)
{ {
__skb_frag_unref(&skb_shinfo(skb)->frags[f]); __skb_frag_unref(&skb_shinfo(skb)->frags[f], false);
} }
/** /**
......
...@@ -664,7 +664,7 @@ static void skb_release_data(struct sk_buff *skb) ...@@ -664,7 +664,7 @@ static void skb_release_data(struct sk_buff *skb)
skb_zcopy_clear(skb, true); skb_zcopy_clear(skb, true);
for (i = 0; i < shinfo->nr_frags; i++) for (i = 0; i < shinfo->nr_frags; i++)
__skb_frag_unref(&shinfo->frags[i]); __skb_frag_unref(&shinfo->frags[i], false);
if (shinfo->frag_list) if (shinfo->frag_list)
kfree_skb_list(shinfo->frag_list); kfree_skb_list(shinfo->frag_list);
...@@ -3495,7 +3495,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) ...@@ -3495,7 +3495,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragto = &skb_shinfo(tgt)->frags[merge]; fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, skb_frag_size(fragfrom)); skb_frag_size_add(fragto, skb_frag_size(fragfrom));
__skb_frag_unref(fragfrom); __skb_frag_unref(fragfrom, false);
} }
/* Reposition in the original skb */ /* Reposition in the original skb */
......
...@@ -128,7 +128,7 @@ static void destroy_record(struct tls_record_info *record) ...@@ -128,7 +128,7 @@ static void destroy_record(struct tls_record_info *record)
int i; int i;
for (i = 0; i < record->num_frags; i++) for (i = 0; i < record->num_frags; i++)
__skb_frag_unref(&record->frags[i]); __skb_frag_unref(&record->frags[i], false);
kfree(record); kfree(record);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment