4.9-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Eric W. Biederman <[email protected]>

commit 040757f738e13caaa9c5078bca79aa97e11dde88 upstream.

Always increment/decrement ucount->count under the ucounts_lock.  The
increments are there already and moving the decrements there means the
locking logic of the code is simpler.  This simplification in the
locking logic fixes a race between put_ucounts and get_ucounts that
could result in a use-after-free because the count could go zero then
be found by get_ucounts and then be freed by put_ucounts.

A bug presumably this one was found by a combination of syzkaller and
KASAN.  JongWhan Kim reported the syzkaller failure and Dmitry Vyukov
spotted the race in the code.

Fixes: f6b2db1a3e8d ("userns: Make the count of user namespaces per user")
Reported-by: JongHwan Kim <[email protected]>
Reported-by: Dmitry Vyukov <[email protected]>
Reviewed-by: Andrei Vagin <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 include/linux/user_namespace.h |    2 +-
 kernel/ucount.c                |   18 +++++++++++-------
 2 files changed, 12 insertions(+), 8 deletions(-)

--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -65,7 +65,7 @@ struct ucounts {
        struct hlist_node node;
        struct user_namespace *ns;
        kuid_t uid;
-       atomic_t count;
+       int count;
        atomic_t ucount[UCOUNT_COUNTS];
 };
 
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -139,7 +139,7 @@ static struct ucounts *get_ucounts(struc
 
                new->ns = ns;
                new->uid = uid;
-               atomic_set(&new->count, 0);
+               new->count = 0;
 
                spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
@@ -150,8 +150,10 @@ static struct ucounts *get_ucounts(struc
                        ucounts = new;
                }
        }
-       if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+       if (ucounts->count == INT_MAX)
                ucounts = NULL;
+       else
+               ucounts->count += 1;
        spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
@@ -160,13 +162,15 @@ static void put_ucounts(struct ucounts *
 {
        unsigned long flags;
 
-       if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock_irqsave(&ucounts_lock, flags);
+       spin_lock_irqsave(&ucounts_lock, flags);
+       ucounts->count -= 1;
+       if (!ucounts->count)
                hlist_del_init(&ucounts->node);
-               spin_unlock_irqrestore(&ucounts_lock, flags);
+       else
+               ucounts = NULL;
+       spin_unlock_irqrestore(&ucounts_lock, flags);
 
-               kfree(ucounts);
-       }
+       kfree(ucounts);
 }
 
 static inline bool atomic_inc_below(atomic_t *v, int u)


Reply via email to