Alexey Gladkov <gladkov.ale...@gmail.com> writes:

> The current implementation of the ucounts reference counter requires the
> use of spin_lock. We're going to use get_ucounts() in more performance
> critical areas like a handling of RLIMIT_SIGPENDING.
>
> Now we need to use spin_lock only if we want to change the hashtable.
>
> v9:
> * Use a negative value to check that the ucounts->count is close to
>   overflow.


Overall this looks good, one small issue below.

Eric

> diff --git a/kernel/ucount.c b/kernel/ucount.c
> index 50cc1dfb7d28..7bac19bb3f1e 100644
> --- a/kernel/ucount.c
> +++ b/kernel/ucount.c
> @@ -11,7 +11,7 @@
>  struct ucounts init_ucounts = {
>       .ns    = &init_user_ns,
>       .uid   = GLOBAL_ROOT_UID,
> -     .count = 1,
> +     .count = ATOMIC_INIT(1),
>  };
>  
>  #define UCOUNTS_HASHTABLE_BITS 10
> @@ -139,6 +139,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
>       spin_unlock_irq(&ucounts_lock);
>  }
>  
> +struct ucounts *get_ucounts(struct ucounts *ucounts)
> +{
> +     if (ucounts && atomic_add_negative(1, &ucounts->count)) {
> +             atomic_dec(&ucounts->count);
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^

To handle the pathological case of all of the other uses calling
put_ucounts after the value goes negative, the above should
be put_ucounts intead of atomic_dec.


> +             ucounts = NULL;
> +     }
> +     return ucounts;
> +}
> +
>  struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
>  {
>       struct hlist_head *hashent = ucounts_hashentry(ns, uid);
> @@ -155,7 +164,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, 
> kuid_t uid)
>  
>               new->ns = ns;
>               new->uid = uid;
> -             new->count = 0;
> +             atomic_set(&new->count, 1);
>  
>               spin_lock_irq(&ucounts_lock);
>               ucounts = find_ucounts(ns, uid, hashent);
> @@ -163,33 +172,12 @@ struct ucounts *alloc_ucounts(struct user_namespace 
> *ns, kuid_t uid)
>                       kfree(new);
>               } else {
>                       hlist_add_head(&new->node, hashent);
> -                     ucounts = new;
> +                     spin_unlock_irq(&ucounts_lock);
> +                     return new;
>               }
>       }
> -     if (ucounts->count == INT_MAX)
> -             ucounts = NULL;
> -     else
> -             ucounts->count += 1;
>       spin_unlock_irq(&ucounts_lock);
> -     return ucounts;
> -}
> -
> -struct ucounts *get_ucounts(struct ucounts *ucounts)
> -{
> -     unsigned long flags;
> -
> -     if (!ucounts)
> -             return NULL;
> -
> -     spin_lock_irqsave(&ucounts_lock, flags);
> -     if (ucounts->count == INT_MAX) {
> -             WARN_ONCE(1, "ucounts: counter has reached its maximum value");
> -             ucounts = NULL;
> -     } else {
> -             ucounts->count += 1;
> -     }
> -     spin_unlock_irqrestore(&ucounts_lock, flags);
> -
> +     ucounts = get_ucounts(ucounts);
>       return ucounts;
>  }
>  
> @@ -197,15 +185,12 @@ void put_ucounts(struct ucounts *ucounts)
>  {
>       unsigned long flags;
>  
> -     spin_lock_irqsave(&ucounts_lock, flags);
> -     ucounts->count -= 1;
> -     if (!ucounts->count)
> +     if (atomic_dec_and_test(&ucounts->count)) {
> +             spin_lock_irqsave(&ucounts_lock, flags);
>               hlist_del_init(&ucounts->node);
> -     else
> -             ucounts = NULL;
> -     spin_unlock_irqrestore(&ucounts_lock, flags);
> -
> -     kfree(ucounts);
> +             spin_unlock_irqrestore(&ucounts_lock, flags);
> +             kfree(ucounts);
> +     }
>  }
>  
>  static inline bool atomic_long_inc_below(atomic_long_t *v, int u)

Reply via email to