Commit 1cc9a98b authored by Reshetova, Elena's avatar Reshetova, Elena Committed by David S. Miller

net: convert inet_peer.refcnt from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
This conversion requires overall +1 on the whole
refcounting scheme.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 574a6020
...@@ -46,7 +46,7 @@ struct inet_peer { ...@@ -46,7 +46,7 @@ struct inet_peer {
struct rcu_head gc_rcu; struct rcu_head gc_rcu;
}; };
/* /*
* Once inet_peer is queued for deletion (refcnt == -1), following field * Once inet_peer is queued for deletion (refcnt == 0), following field
* is not available: rid * is not available: rid
* We can share memory with rcu_head to help keep inet_peer small. * We can share memory with rcu_head to help keep inet_peer small.
*/ */
...@@ -60,7 +60,7 @@ struct inet_peer { ...@@ -60,7 +60,7 @@ struct inet_peer {
/* following fields might be frequently dirtied */ /* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */ __u32 dtime; /* the time of last use of not referenced entries */
atomic_t refcnt; refcount_t refcnt;
}; };
struct inet_peer_base { struct inet_peer_base {
......
...@@ -115,7 +115,7 @@ static void inetpeer_gc_worker(struct work_struct *work) ...@@ -115,7 +115,7 @@ static void inetpeer_gc_worker(struct work_struct *work)
n = list_entry(p->gc_list.next, struct inet_peer, gc_list); n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
if (!atomic_read(&p->refcnt)) { if (refcount_read(&p->refcnt) == 1) {
list_del(&p->gc_list); list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p); kmem_cache_free(peer_cachep, p);
} }
...@@ -202,10 +202,11 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, ...@@ -202,10 +202,11 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
int cmp = inetpeer_addr_cmp(daddr, &u->daddr); int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
if (cmp == 0) { if (cmp == 0) {
/* Before taking a reference, check if this entry was /* Before taking a reference, check if this entry was
* deleted (refcnt=-1) * deleted (refcnt=0)
*/ */
if (!atomic_add_unless(&u->refcnt, 1, -1)) if (!refcount_inc_not_zero(&u->refcnt)) {
u = NULL; u = NULL;
}
return u; return u;
} }
if (cmp == -1) if (cmp == -1)
...@@ -382,11 +383,10 @@ static int inet_peer_gc(struct inet_peer_base *base, ...@@ -382,11 +383,10 @@ static int inet_peer_gc(struct inet_peer_base *base,
while (stackptr > stack) { while (stackptr > stack) {
stackptr--; stackptr--;
p = rcu_deref_locked(**stackptr, base); p = rcu_deref_locked(**stackptr, base);
if (atomic_read(&p->refcnt) == 0) { if (refcount_read(&p->refcnt) == 1) {
smp_rmb(); smp_rmb();
delta = (__u32)jiffies - p->dtime; delta = (__u32)jiffies - p->dtime;
if (delta >= ttl && if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) {
atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
p->gc_next = gchead; p->gc_next = gchead;
gchead = p; gchead = p;
} }
...@@ -432,7 +432,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, ...@@ -432,7 +432,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
relookup: relookup:
p = lookup(daddr, stack, base); p = lookup(daddr, stack, base);
if (p != peer_avl_empty) { if (p != peer_avl_empty) {
atomic_inc(&p->refcnt); refcount_inc(&p->refcnt);
write_sequnlock_bh(&base->lock); write_sequnlock_bh(&base->lock);
return p; return p;
} }
...@@ -444,7 +444,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, ...@@ -444,7 +444,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) { if (p) {
p->daddr = *daddr; p->daddr = *daddr;
atomic_set(&p->refcnt, 1); refcount_set(&p->refcnt, 2);
atomic_set(&p->rid, 0); atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0; p->rate_tokens = 0;
...@@ -468,7 +468,7 @@ void inet_putpeer(struct inet_peer *p) ...@@ -468,7 +468,7 @@ void inet_putpeer(struct inet_peer *p)
{ {
p->dtime = (__u32)jiffies; p->dtime = (__u32)jiffies;
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_dec(&p->refcnt); refcount_dec(&p->refcnt);
} }
EXPORT_SYMBOL_GPL(inet_putpeer); EXPORT_SYMBOL_GPL(inet_putpeer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment