This is a note to let you know that I've just added the patch titled
inetpeer: fix race in unused_list manipulations
to the 2.6.39-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
The filename of the patch is:
inetpeer-fix-race-in-unused_list-manipulations.patch
and it can be found in the queue-2.6.39 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.
>From 452af3cfcca516324a287a0d610c73d987c5d092 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <[email protected]>
Date: Thu, 26 May 2011 17:27:11 +0000
Subject: inetpeer: fix race in unused_list manipulations
From: Eric Dumazet <[email protected]>
[ Upstream commit 686a7e32ca7fdd819eb9606abd3db52b77d1479f ]
Several crashes in cleanup_once() were reported in recent kernels.
Commit d6cc1d642de9 (inetpeer: various changes) added a race in
unlink_from_unused().
One way to avoid taking unused_peers.lock before doing the list_empty()
test is to catch 0->1 refcnt transitions, using full barrier atomic
operations variants (atomic_cmpxchg() and atomic_inc_return()) instead
of previous atomic_inc() and atomic_add_unless() variants.
We then call unlink_from_unused() only for the owner of the 0->1
transition.
Add a new atomic_add_unless_return() static helper
With help from Arun Sharma.
Refs: https://bugzilla.kernel.org/show_bug.cgi?id=32772
Reported-by: Arun Sharma <[email protected]>
Reported-by: Maximilian Engelhardt <[email protected]>
Reported-by: Yann Dupont <[email protected]>
Reported-by: Denys Fedoryshchenko <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
net/ipv4/inetpeer.c | 42 +++++++++++++++++++++++++++---------------
1 file changed, 27 insertions(+), 15 deletions(-)
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
/* Called with or without local BH being disabled. */
static void unlink_from_unused(struct inet_peer *p)
{
- if (!list_empty(&p->unused)) {
- spin_lock_bh(&unused_peers.lock);
- list_del_init(&p->unused);
- spin_unlock_bh(&unused_peers.lock);
- }
+ spin_lock_bh(&unused_peers.lock);
+ list_del_init(&p->unused);
+ spin_unlock_bh(&unused_peers.lock);
}
static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct ine
u; \
})
+static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
+{
+ int cur, old = atomic_read(ptr);
+
+ while (old != u) {
+ *newv = old + a;
+ cur = atomic_cmpxchg(ptr, old, *newv);
+ if (cur == old)
+ return true;
+ old = cur;
+ }
+ return false;
+}
+
/*
* Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct ine
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
- struct inet_peer_base *base)
+ struct inet_peer_base *base,
+ int *newrefcnt)
{
struct inet_peer *u = rcu_dereference(base->root);
int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(cons
* distinction between an unused entry (refcnt=0) and
* a freed one.
*/
- if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+ if (!atomic_add_unless_return(&u->refcnt, 1, -1,
newrefcnt))
u = NULL;
return u;
}
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct in
struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
unsigned int sequence;
- int invalidated;
+ int invalidated, newrefcnt = 0;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
*/
rcu_read_lock();
sequence = read_seqbegin(&base->lock);
- p = lookup_rcu(daddr, base);
+ p = lookup_rcu(daddr, base, &newrefcnt);
invalidated = read_seqretry(&base->lock, sequence);
rcu_read_unlock();
if (p) {
- /* The existing node has been found.
+found: /* The existing node has been found.
* Remove the entry from unused list if it was there.
*/
- unlink_from_unused(p);
+ if (newrefcnt == 1)
+ unlink_from_unused(p);
return p;
}
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct in
write_seqlock_bh(&base->lock);
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
- atomic_inc(&p->refcnt);
+ newrefcnt = atomic_inc_return(&p->refcnt);
write_sequnlock_bh(&base->lock);
- /* Remove the entry from unused list if it was there. */
- unlink_from_unused(p);
- return p;
+ goto found;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
Patches currently in stable-queue which might be from [email protected] are
queue-2.6.39/inetpeer-fix-race-in-unused_list-manipulations.patch
queue-2.6.39/af_packet-prevent-information-leak.patch
queue-2.6.39/bridge-provide-a-cow_metrics-method-for-fake_ops.patch
queue-2.6.39/inet_diag-fix-inet_diag_bc_audit.patch
_______________________________________________
stable mailing list
[email protected]
http://linux.kernel.org/mailman/listinfo/stable