On Fri, Mar 29, 2019 at 11:20:04AM -0400, Alex Kogan wrote: > +static inline void pass_mcs_lock(struct mcs_spinlock *node, > + struct mcs_spinlock *next) > +{ > + struct mcs_spinlock *succ = NULL; > + > + succ = find_successor(node); > + > + if (succ) { > + arch_mcs_spin_unlock_contended(&succ->locked, node->locked); > + } else if (node->locked > 1) { > + /* > + * If the secondary queue is not empty, pass the lock > + * to the first node in that queue. > + */ > + succ = MCS_NODE(node->locked); > + succ->tail->next = next; > + arch_mcs_spin_unlock_contended(&succ->locked, 1); > + } else { > + /* > + * Otherwise, pass the lock to the immediate successor > + * in the main queue. > + */ > + arch_mcs_spin_unlock_contended(&next->locked, 1); > + } > +}
Note that something like: static inline void pass_mcs_lock(struct mcs_spinlock *node, struct mcs_spinlock *next) { struct mcs_spinlock *succ = NULL; uintptr_t *var = &next->locked; uintptr_t val = 1; succ = find_successor(node); if (succ) { var = &succ->locked; val = node->locked; } else if (node->locked > 1) { succ = MCS_NODE(node->locked); succ->tail->next = next; /* WRITE_ONCE() !?!? */ var = &node->locked; } arch_mcs_spin_unlock_contended(var, val); } is shorter and generates much better code if arch_mcs_spin_unlock_contended() is asm volatile ().