On Fri, Jan 22, 2021 at 08:17:01PM -0500, Joel Fernandes (Google) wrote:
> +static inline void __sched_core_erase_cookie(struct sched_core_cookie 
> *cookie)
> +{
> +     lockdep_assert_held(&sched_core_cookies_lock);
> +
> +     /* Already removed */
> +     if (RB_EMPTY_NODE(&cookie->node))
> +             return;
> +
> +     rb_erase(&cookie->node, &sched_core_cookies);
> +     RB_CLEAR_NODE(&cookie->node);
> +}
> +
> +/* Called when a task no longer points to the cookie in question */
> +static void sched_core_put_cookie(struct sched_core_cookie *cookie)
> +{
> +     unsigned long flags;
> +
> +     if (!cookie)
> +             return;
> +
> +     if (refcount_dec_and_test(&cookie->refcnt)) {
> +             raw_spin_lock_irqsave(&sched_core_cookies_lock, flags);
> +             __sched_core_erase_cookie(cookie);
> +             raw_spin_unlock_irqrestore(&sched_core_cookies_lock, flags);
> +             kfree(cookie);
> +     }
> +}

> +static void __sched_core_update_cookie(struct task_struct *p)
> +{

> +     raw_spin_lock(&sched_core_cookies_lock);

> +             /*
> +              * Cookie exists, increment refcnt. If refcnt is currently 0,
> +              * we're racing with a put() (refcnt decremented but cookie not
> +              * yet removed from the tree). In this case, we can simply
> +              * perform the removal ourselves and retry.
> +              * sched_core_put_cookie() will still function correctly.
> +              */
> +             if (unlikely(!refcount_inc_not_zero(&match->refcnt))) {
> +                     __sched_core_erase_cookie(match);
> +                     goto retry;
> +             }

refcount_dec_and_lock() avoids that complication.

Reply via email to