On Wed, Jan 18, 2017 at 10:17:32PM +0900, Byungchul Park wrote:

> +struct cross_lock {
> +     /*
> +      * When more than one acquisition of crosslocks are overlapped,
> +      * we do actual commit only when ref == 0.
> +      */
> +     atomic_t ref;

That comment doesn't seem right, should that be: ref != 0 ?

Also; would it not be much clearer to call this: nr_blocked, or waiters
or something along those lines, because that is what it appears to be.

> +     /*
> +      * Seperate hlock instance. This will be used at commit step.
> +      *
> +      * TODO: Use a smaller data structure containing only necessary
> +      * data. However, we should make lockdep code able to handle the
> +      * smaller one first.
> +      */
> +     struct held_lock        hlock;
> +};

> +static int add_xlock(struct held_lock *hlock)
> +{
> +     struct cross_lock *xlock;
> +     unsigned int gen_id;
> +
> +     if (!depend_after(hlock))
> +             return 1;
> +
> +     if (!graph_lock())
> +             return 0;
> +
> +     xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
> +
> +     /*
> +      * When acquisitions for a xlock are overlapped, we use
> +      * a reference counter to handle it.

Handle what!? That comment is near empty.

> +      */
> +     if (atomic_inc_return(&xlock->ref) > 1)
> +             goto unlock;

So you set the xlock's generation only once, to the oldest blocking-on
relation, which makes sense, you want to be able to related to all
historical locks since.

> +
> +     gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
> +     xlock->hlock = *hlock;
> +     xlock->hlock.gen_id = gen_id;
> +unlock:
> +     graph_unlock();
> +     return 1;
> +}

> +void lock_commit_crosslock(struct lockdep_map *lock)
> +{
> +     struct cross_lock *xlock;
> +     unsigned long flags;
> +
> +     if (!current->xhlocks)
> +             return;
> +
> +     if (unlikely(current->lockdep_recursion))
> +             return;
> +
> +     raw_local_irq_save(flags);
> +     check_flags(flags);
> +     current->lockdep_recursion = 1;
> +
> +     if (unlikely(!debug_locks))
> +             return;
> +
> +     if (!graph_lock())
> +             return;
> +
> +     xlock = &((struct lockdep_map_cross *)lock)->xlock;
> +     if (atomic_read(&xlock->ref) > 0 && !commit_xhlocks(xlock))

You terminate with graph_lock() held.

Also, I think you can do the atomic_read() outside of graph lock, to
avoid taking graph_lock when its 0.

> +             return;
> +
> +     graph_unlock();
> +     current->lockdep_recursion = 0;
> +     raw_local_irq_restore(flags);
> +}
> +EXPORT_SYMBOL_GPL(lock_commit_crosslock);
> +
> +/*
> + * return 0: Need to do normal release operation.
> + * return 1: Done. No more release ops is needed.
> + */
> +static int lock_release_crosslock(struct lockdep_map *lock)
> +{
> +     if (cross_lock(lock)) {
> +             atomic_dec(&((struct lockdep_map_cross *)lock)->xlock.ref);
> +             return 1;
> +     }
> +     return 0;
> +}
> +
> +static void cross_init(struct lockdep_map *lock, int cross)
> +{
> +     if (cross)
> +             atomic_set(&((struct lockdep_map_cross *)lock)->xlock.ref, 0);
> +
> +     lock->cross = cross;
> +}

Reply via email to