On Fri, Feb 07, 2014 at 05:58:01PM +0100, Torsten Duwe wrote:
> +static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
>  {
> +     register struct __raw_tickets old, tmp,
> +             inc = { .tail = TICKET_LOCK_INC };
> +
>       CLEAR_IO_SYNC;
> +     __asm__ __volatile__(
> +"1:  lwarx   %0,0,%4         # arch_spin_lock\n"
> +"    add     %1,%3,%0\n"
> +     PPC405_ERR77(0, "%4")
> +"    stwcx.  %1,0,%4\n"
> +"    bne-    1b"
> +     : "=&r" (old), "=&r" (tmp), "+m" (lock->tickets)
> +     : "r" (inc), "r" (&lock->tickets)
> +     : "cc");
> +
> +     if (likely(old.head == old.tail))
> +             goto out;

I would have expected an lwsync someplace hereabouts.

> +     for (;;) {
> +             unsigned count = 100;
> +
>               do {
> +                     if (ACCESS_ONCE(lock->tickets.head) == old.tail)
> +                             goto out;
>                       HMT_low();
>                       if (SHARED_PROCESSOR)
>                               __spin_yield(lock);
> +             } while (--count);
>               HMT_medium();
>       }
> +out:
> +#if defined(CONFIG_PPC_SPLPAR)
> +     lock->holder = LOCK_TOKEN;
> +#endif
> +     barrier();      /* make sure nothing creeps before the lock is taken */
>  }
>  
>  static inline

> @@ -147,10 +220,21 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, 
> unsigned long flags)
>  
>  static inline void arch_spin_unlock(arch_spinlock_t *lock)
>  {
> +     arch_spinlock_t old, new;
> +
> +#if defined(CONFIG_PPC_SPLPAR)
> +     lock->holder = 0;
> +#endif
> +     do {
> +             old.tickets = ACCESS_ONCE(lock->tickets);
> +             new.tickets.head = old.tickets.head + TICKET_LOCK_INC;
> +             new.tickets.tail = old.tickets.tail;
> +     } while (unlikely(__arch_spin_cmpxchg_eq(lock,
> +                                              old.head_tail,
> +                                              new.head_tail)));
>       SYNC_IO;
>       __asm__ __volatile__("# arch_spin_unlock\n\t"
>                               PPC_RELEASE_BARRIER: : :"memory");

Doens't your cmpxchg_eq not already imply a lwsync?

> -     lock->slock = 0;
>  }

I'm still failing to see why you need an ll/sc pair for unlock.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to