From 1cc9a98b59ba92fece1277f76aa43e05f34936a6 Mon Sep 17 00:00:00 2001 From: "Reshetova, Elena" Date: Fri, 30 Jun 2017 13:07:54 +0300 Subject: [PATCH] net: convert inet_peer.refcnt from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. This conversion requires overall +1 on the whole refcounting scheme. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: David S. Miller --- include/net/inetpeer.h | 4 ++-- net/ipv4/inetpeer.c | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 235c7811a86a..f2a215fc78e4 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -46,7 +46,7 @@ struct inet_peer { struct rcu_head gc_rcu; }; /* - * Once inet_peer is queued for deletion (refcnt == -1), following field + * Once inet_peer is queued for deletion (refcnt == 0), following field * is not available: rid * We can share memory with rcu_head to help keep inet_peer small. */ @@ -60,7 +60,7 @@ struct inet_peer { /* following fields might be frequently dirtied */ __u32 dtime; /* the time of last use of not referenced entries */ - atomic_t refcnt; + refcount_t refcnt; }; struct inet_peer_base { diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 86fa45809540..c5a117cc6619 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -115,7 +115,7 @@ static void inetpeer_gc_worker(struct work_struct *work) n = list_entry(p->gc_list.next, struct inet_peer, gc_list); - if (!atomic_read(&p->refcnt)) { + if (refcount_read(&p->refcnt) == 1) { list_del(&p->gc_list); kmem_cache_free(peer_cachep, p); } @@ -202,10 +202,11 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, int cmp = inetpeer_addr_cmp(daddr, &u->daddr); if (cmp == 0) { /* Before taking a reference, check if this entry was - * deleted (refcnt=-1) + * deleted (refcnt=0) */ - if (!atomic_add_unless(&u->refcnt, 1, -1)) + if (!refcount_inc_not_zero(&u->refcnt)) { u = NULL; + } return u; } if (cmp == -1) @@ -382,11 +383,10 @@ static int inet_peer_gc(struct inet_peer_base *base, while (stackptr > stack) { stackptr--; p = rcu_deref_locked(**stackptr, base); - if (atomic_read(&p->refcnt) == 0) { + if (refcount_read(&p->refcnt) == 1) { smp_rmb(); delta = (__u32)jiffies - p->dtime; - if (delta >= ttl && - atomic_cmpxchg(&p->refcnt, 0, -1) == 0) { + if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) { p->gc_next = gchead; gchead = p; } @@ -432,7 +432,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, relookup: p = lookup(daddr, stack, base); if (p != peer_avl_empty) { - atomic_inc(&p->refcnt); + refcount_inc(&p->refcnt); write_sequnlock_bh(&base->lock); return p; } @@ -444,7 +444,7 @@ relookup: p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; if (p) { p->daddr = *daddr; - atomic_set(&p->refcnt, 1); + refcount_set(&p->refcnt, 2); atomic_set(&p->rid, 0); p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; @@ -468,7 +468,7 @@ void inet_putpeer(struct inet_peer *p) { p->dtime = (__u32)jiffies; smp_mb__before_atomic(); - atomic_dec(&p->refcnt); + refcount_dec(&p->refcnt); } EXPORT_SYMBOL_GPL(inet_putpeer); -- 2.30.2