On 09/28/2013 12:34 AM, Jason Low wrote:
Also, below is what the mcs_spin_lock() and mcs_spin_unlock()
functions would look like after applying the proposed changes.
static noinline
void mcs_spin_lock(struct mcs_spin_node **lock, struct mcs_spin_node *node)
{
struct mcs_spin_node *prev;
/* Init node */
node->locked = 0;
node->next = NULL;
prev = xchg(lock, node);
if (likely(prev == NULL)) {
/* Lock acquired. No need to set node->locked since it
won't be used */
return;
}
ACCESS_ONCE(prev->next) = node;
/* Wait until the lock holder passes the lock down */
while (!ACCESS_ONCE(node->locked))
arch_mutex_cpu_relax();
smp_mb();
I wonder if a memory barrier is really needed here.
}
static void mcs_spin_unlock(struct mcs_spin_node **lock, struct
mcs_spin_node *node)
{
struct mcs_spin_node *next = ACCESS_ONCE(node->next);
if (likely(!next)) {
/*
* Release the lock by setting it to NULL
*/
if (cmpxchg(lock, node, NULL) == node)
return;
/* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
}
smp_wmb();
ACCESS_ONCE(next->locked) = 1;
}
Instead, I think what we need may be:
if (likely(!next)) {
....
} else
smp_mb();
ACCESS_ONCE(next->locked) = 1;
That will ensure a memory barrier in the unlock path.
Regards,
Longman
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/