4.13-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Manfred Spraul <manf...@colorfullife.com>

commit 3ef0c7a730de0bae03d86c19570af764fa3c4445 upstream.

As we want to remove spin_unlock_wait() and replace it with explicit
spin_lock()/spin_unlock() calls, we can use this to simplify the
locking.

In addition:
- Reading nf_conntrack_locks_all needs ACQUIRE memory ordering.
- The new code avoids the backwards loop.

Only slightly tested, I did not manage to trigger calls to
nf_conntrack_all_lock().

V2: With improved comments, to clearly show how the barriers
    pair.

Fixes: b16c29191dc8 ("netfilter: nf_conntrack: use safer way to lock all 
buckets")
Signed-off-by: Manfred Spraul <manf...@colorfullife.com>
Cc: Alan Stern <st...@rowland.harvard.edu>
Cc: Sasha Levin <sasha.le...@oracle.com>
Cc: Pablo Neira Ayuso <pa...@netfilter.org>
Cc: netfilter-de...@vger.kernel.org
Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>

---
 net/netfilter/nf_conntrack_core.c |   52 +++++++++++++++++++++-----------------
 1 file changed, 29 insertions(+), 23 deletions(-)

--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -96,19 +96,26 @@ static struct conntrack_gc_work conntrac
 
 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
 {
+       /* 1) Acquire the lock */
        spin_lock(lock);
-       while (unlikely(nf_conntrack_locks_all)) {
-               spin_unlock(lock);
 
-               /*
-                * Order the 'nf_conntrack_locks_all' load vs. the
-                * spin_unlock_wait() loads below, to ensure
-                * that 'nf_conntrack_locks_all_lock' is indeed held:
-                */
-               smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
-               spin_unlock_wait(&nf_conntrack_locks_all_lock);
-               spin_lock(lock);
-       }
+       /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
+        * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
+        */
+       if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
+               return;
+
+       /* fast path failed, unlock */
+       spin_unlock(lock);
+
+       /* Slow path 1) get global lock */
+       spin_lock(&nf_conntrack_locks_all_lock);
+
+       /* Slow path 2) get the lock we want */
+       spin_lock(lock);
+
+       /* Slow path 3) release the global lock */
+       spin_unlock(&nf_conntrack_locks_all_lock);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
@@ -149,28 +156,27 @@ static void nf_conntrack_all_lock(void)
        int i;
 
        spin_lock(&nf_conntrack_locks_all_lock);
-       nf_conntrack_locks_all = true;
 
-       /*
-        * Order the above store of 'nf_conntrack_locks_all' against
-        * the spin_unlock_wait() loads below, such that if
-        * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
-        * we must observe nf_conntrack_locks[] held:
-        */
-       smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
+       nf_conntrack_locks_all = true;
 
        for (i = 0; i < CONNTRACK_LOCKS; i++) {
-               spin_unlock_wait(&nf_conntrack_locks[i]);
+               spin_lock(&nf_conntrack_locks[i]);
+
+               /* This spin_unlock provides the "release" to ensure that
+                * nf_conntrack_locks_all==true is visible to everyone that
+                * acquired spin_lock(&nf_conntrack_locks[]).
+                */
+               spin_unlock(&nf_conntrack_locks[i]);
        }
 }
 
 static void nf_conntrack_all_unlock(void)
 {
-       /*
-        * All prior stores must be complete before we clear
+       /* All prior stores must be complete before we clear
         * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
         * might observe the false value but not the entire
-        * critical section:
+        * critical section.
+        * It pairs with the smp_load_acquire() in nf_conntrack_lock()
         */
        smp_store_release(&nf_conntrack_locks_all, false);
        spin_unlock(&nf_conntrack_locks_all_lock);


Reply via email to