Commit 2638595a authored by Reshetova, Elena's avatar Reshetova, Elena Committed by David S. Miller

net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 63354797
...@@ -915,7 +915,7 @@ struct sk_buff_fclones { ...@@ -915,7 +915,7 @@ struct sk_buff_fclones {
struct sk_buff skb2; struct sk_buff skb2;
atomic_t fclone_ref; refcount_t fclone_ref;
}; };
/** /**
...@@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, ...@@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
fclones = container_of(skb, struct sk_buff_fclones, skb1); fclones = container_of(skb, struct sk_buff_fclones, skb1);
return skb->fclone == SKB_FCLONE_ORIG && return skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) > 1 && refcount_read(&fclones->fclone_ref) > 1 &&
fclones->skb2.sk == sk; fclones->skb2.sk == sk;
} }
......
...@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, ...@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
kmemcheck_annotate_bitfield(&fclones->skb2, flags1); kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG; skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1); refcount_set(&fclones->fclone_ref, 1);
fclones->skb2.fclone = SKB_FCLONE_CLONE; fclones->skb2.fclone = SKB_FCLONE_CLONE;
} }
...@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb) ...@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
* This test would have no chance to be true for the clone, * This test would have no chance to be true for the clone,
* while here, branch prediction will be good. * while here, branch prediction will be good.
*/ */
if (atomic_read(&fclones->fclone_ref) == 1) if (refcount_read(&fclones->fclone_ref) == 1)
goto fastpath; goto fastpath;
break; break;
...@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb) ...@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
fclones = container_of(skb, struct sk_buff_fclones, skb2); fclones = container_of(skb, struct sk_buff_fclones, skb2);
break; break;
} }
if (!atomic_dec_and_test(&fclones->fclone_ref)) if (!refcount_dec_and_test(&fclones->fclone_ref))
return; return;
fastpath: fastpath:
kmem_cache_free(skbuff_fclone_cache, fclones); kmem_cache_free(skbuff_fclone_cache, fclones);
...@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
return NULL; return NULL;
if (skb->fclone == SKB_FCLONE_ORIG && if (skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) == 1) { refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2; n = &fclones->skb2;
atomic_set(&fclones->fclone_ref, 2); refcount_set(&fclones->fclone_ref, 2);
} else { } else {
if (skb_pfmemalloc(skb)) if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC; gfp_mask |= __GFP_MEMALLOC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment