Use ll/sc loops instead of C loops around cmpxchg.
Update the atomic64_add_unless block comment to match the code.

Signed-off-by: Richard Henderson <r...@twiddle.net>
---
 arch/alpha/include/asm/atomic.h | 60 +++++++++++++++++++++++++----------------
 1 file changed, 37 insertions(+), 23 deletions(-)

diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index c2cbe4f..0dc18fc 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, 
atomic64_t * v)
  */
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
-       int c, old;
-       c = atomic_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c;
+       int c, new, old;
+       smp_mb();
+       __asm__ __volatile__(
+       "1:     ldl_l   %[old],%[mem]\n"
+       "       cmpeq   %[old],%[u],%[c]\n"
+       "       addl    %[old],%[a],%[new]\n"
+       "       bne     %[c],2f\n"
+       "       stl_c   %[new],%[mem]\n"
+       "       beq     %[new],3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br      1b\n"
+       ".previous"
+       : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
+       : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
+       : "memory");
+       smp_mb();
+       return old;
 }
 
 
@@ -207,21 +214,28 @@ static __inline__ int __atomic_add_unless(atomic_t *v, 
int a, int u)
  * @u: ...unless v is equal to u.
  *
  * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
+ * Returns true iff @v was not @u.
  */
 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-       long c, old;
-       c = atomic64_read(v);
-       for (;;) {
-               if (unlikely(c == (u)))
-                       break;
-               old = atomic64_cmpxchg((v), c, c + (a));
-               if (likely(old == c))
-                       break;
-               c = old;
-       }
-       return c != (u);
+       long c, tmp;
+       smp_mb();
+       __asm__ __volatile__(
+       "1:     ldq_l   %[tmp],%[mem]\n"
+       "       cmpeq   %[tmp],%[u],%[c]\n"
+       "       addq    %[tmp],%[a],%[tmp]\n"
+       "       bne     %[c],2f\n"
+       "       stq_c   %[tmp],%[mem]\n"
+       "       beq     %[tmp],3f\n"
+       "2:\n"
+       ".subsection 2\n"
+       "3:     br      1b\n"
+       ".previous"
+       : [tmp] "=&r"(tmp), [c] "=&r"(c)
+       : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
+       : "memory");
+       smp_mb();
+       return !c;
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to