From: Sergey Matyukevich <sergey.matyukev...@synopsys.com>

All the llock/scond based atomic operations read and write atomic counter
field. However write operation performed by the scond instruction is not
properly communicated to the compiler: inline assembly shows atomic
argument as an input parameter and its clobber list does not include
'memory'. As a result, compiler can optimize the usage of atomic argument.
This issue can be observed with the following simple test functions:

static void test_atomic_simple(void)
{
        int v0 = 0xfaceabab;
        atomic_t v;
        int r;

        atomic_set(&v, v0);
        r = v0;

        atomic_inc(&v);
        r += 1;
        BUG_ON(v.counter != r);
}

static void test_atomic_simple64(void)
{
        long long v0 = 0xfaceabadf00df001LL;
        atomic64_t v;
        long long r;

        atomic64_set(&v, v0);
        r = v0;

        atomic64_inc(&v);
        r += 1LL;
        BUG_ON(v.counter != r);
}

This commit fixes unwanted optimizations notifying compiler about write
operations. For this purpose atomic argument is moved to the output list.
Besides "memory" is added to the list of clobber arguments unless it is
explicitly clarified by the comment why it is not needed. Finally,
appropriate memory constraint 'ATO' is set for atomic ops arguments.

Signed-off-by: Sergey Matyukevich <sergey.matyukev...@synopsys.com>
---
 arch/arc/include/asm/atomic-llsc.h    | 36 ++++++++---------
 arch/arc/include/asm/atomic64-arcv2.h | 58 +++++++++++++--------------
 2 files changed, 47 insertions(+), 47 deletions(-)

diff --git a/arch/arc/include/asm/atomic-llsc.h 
b/arch/arc/include/asm/atomic-llsc.h
index 1b0ffaeee16d..b6cbc3ea1f52 100644
--- a/arch/arc/include/asm/atomic-llsc.h
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -11,14 +11,14 @@ static inline void arch_atomic_##op(int i, atomic_t *v)     
                \
        unsigned int val;                                               \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     llock   %[val], [%[ctr]]                \n"             \
+       "1:     llock   %[val], %[ctr]                  \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
-       "       scond   %[val], [%[ctr]]                \n"             \
+       "       scond   %[val], %[ctr]                  \n"             \
        "       bnz     1b                              \n"             \
-       : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
-       : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg 
direct addr mode */  \
-         [i]   "ir"    (i)                                             \
-       : "cc");                                                        \
+       : [val] "=&r"   (val), /* Early clobber to prevent reg reuse */ \
+         [ctr] "+ATO" (v->counter)                             \
+       : [i]   "ir"    (i)                                             \
+       : "cc", "memory");                                              \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, asm_op)                           \
@@ -27,14 +27,14 @@ static inline int arch_atomic_##op##_return_relaxed(int i, 
atomic_t *v)     \
        unsigned int val;                                               \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     llock   %[val], [%[ctr]]                \n"             \
+       "1:     llock   %[val], %[ctr]                  \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
-       "       scond   %[val], [%[ctr]]                \n"             \
+       "       scond   %[val], %[ctr]                  \n"             \
        "       bnz     1b                              \n"             \
-       : [val] "=&r"   (val)                                           \
-       : [ctr] "r"     (&v->counter),                                  \
-         [i]   "ir"    (i)                                             \
-       : "cc");                                                        \
+       : [val] "=&r"   (val),                                          \
+         [ctr] "+ATO" (v->counter)                             \
+       : [i]   "ir"    (i)                                             \
+       : "cc", "memory");                                              \
                                                                        \
        return val;                                                     \
 }
@@ -48,15 +48,15 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, 
atomic_t *v)      \
        unsigned int val, orig;                                         \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     llock   %[orig], [%[ctr]]               \n"             \
+       "1:     llock   %[orig], %[ctr]                 \n"             \
        "       " #asm_op " %[val], %[orig], %[i]       \n"             \
-       "       scond   %[val], [%[ctr]]                \n"             \
+       "       scond   %[val], %[ctr]                  \n"             \
        "       bnz     1b                              \n"             \
        : [val] "=&r"   (val),                                          \
-         [orig] "=&r" (orig)                                           \
-       : [ctr] "r"     (&v->counter),                                  \
-         [i]   "ir"    (i)                                             \
-       : "cc");                                                        \
+         [orig] "=&r" (orig),                                          \
+         [ctr] "+ATO" (v->counter)                             \
+       : [i]   "ir"    (i)                                             \
+       : "cc", "memory");                                              \
                                                                        \
        return orig;                                                    \
 }
diff --git a/arch/arc/include/asm/atomic64-arcv2.h 
b/arch/arc/include/asm/atomic64-arcv2.h
index c5a8010fdc97..ea9b5d41b645 100644
--- a/arch/arc/include/asm/atomic64-arcv2.h
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -53,14 +53,14 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) 
        \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:                             \n"                             \
-       "       llockd  %0, [%1]        \n"                             \
+       "       llockd  %0, %1          \n"                             \
        "       " #op1 " %L0, %L0, %L2  \n"                             \
        "       " #op2 " %H0, %H0, %H2  \n"                             \
-       "       scondd   %0, [%1]       \n"                             \
+       "       scondd   %0, %1         \n"                             \
        "       bnz     1b              \n"                             \
-       : "=&r"(val)                                                    \
-       : "r"(&v->counter), "ir"(a)                                     \
-       : "cc");                                                        \
+       : "=&r"(val), "+ATO"(v->counter)                                        
\
+       : "ir"(a)                                                       \
+       : "cc", "memory");                                              \
 }                                                                      \
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)                               \
@@ -70,13 +70,13 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 
a, atomic64_t *v) \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:                             \n"                             \
-       "       llockd   %0, [%1]       \n"                             \
+       "       llockd   %0, %1         \n"                             \
        "       " #op1 " %L0, %L0, %L2  \n"                             \
        "       " #op2 " %H0, %H0, %H2  \n"                             \
-       "       scondd   %0, [%1]       \n"                             \
+       "       scondd   %0, %1         \n"                             \
        "       bnz     1b              \n"                             \
-       : [val] "=&r"(val)                                              \
-       : "r"(&v->counter), "ir"(a)                                     \
+       : "=&r"(val), "+ATO"(v->counter)                                        
\
+       : "ir"(a)                                                       \
        : "cc");        /* memory clobber comes from smp_mb() */        \
                                                                        \
        return val;                                                     \
@@ -92,13 +92,13 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, 
atomic64_t *v)  \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:                             \n"                             \
-       "       llockd   %0, [%2]       \n"                             \
+       "       llockd   %0, %2         \n"                             \
        "       " #op1 " %L1, %L0, %L3  \n"                             \
        "       " #op2 " %H1, %H0, %H3  \n"                             \
-       "       scondd   %1, [%2]       \n"                             \
+       "       scondd   %1, %2         \n"                             \
        "       bnz     1b              \n"                             \
-       : "=&r"(orig), "=&r"(val)                                       \
-       : "r"(&v->counter), "ir"(a)                                     \
+       : "=&r"(orig), "=&r"(val), "+ATO"(v->counter)                   \
+       : "ir"(a)                                                       \
        : "cc");        /* memory clobber comes from smp_mb() */        \
                                                                        \
        return orig;                                                    \
@@ -145,14 +145,14 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 
new)
        smp_mb();
 
        __asm__ __volatile__(
-       "1:     llockd  %0, [%1]        \n"
+       "1:     llockd  %0, %1          \n"
        "       brne    %L0, %L2, 2f    \n"
        "       brne    %H0, %H2, 2f    \n"
-       "       scondd  %3, [%1]        \n"
+       "       scondd  %3, %1          \n"
        "       bnz     1b              \n"
        "2:                             \n"
-       : "=&r"(prev)
-       : "r"(ptr), "ir"(expected), "r"(new)
+       : "=&r"(prev), "+ATO"(*ptr)
+       : "ir"(expected), "r"(new)
        : "cc");        /* memory clobber comes from smp_mb() */
 
        smp_mb();
@@ -167,12 +167,12 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 
new)
        smp_mb();
 
        __asm__ __volatile__(
-       "1:     llockd  %0, [%1]        \n"
-       "       scondd  %2, [%1]        \n"
+       "1:     llockd  %0, %1          \n"
+       "       scondd  %2, %1          \n"
        "       bnz     1b              \n"
        "2:                             \n"
-       : "=&r"(prev)
-       : "r"(ptr), "r"(new)
+       : "=&r"(prev), "+ATO"(*ptr)
+       : "r"(new)
        : "cc");        /* memory clobber comes from smp_mb() */
 
        smp_mb();
@@ -195,15 +195,15 @@ static inline s64 
arch_atomic64_dec_if_positive(atomic64_t *v)
        smp_mb();
 
        __asm__ __volatile__(
-       "1:     llockd  %0, [%1]        \n"
+       "1:     llockd  %0, %1          \n"
        "       sub.f   %L0, %L0, 1     # w0 - 1, set C on borrow\n"
        "       sub.c   %H0, %H0, 1     # if C set, w1 - 1\n"
        "       brlt    %H0, 0, 2f      \n"
-       "       scondd  %0, [%1]        \n"
+       "       scondd  %0, %1          \n"
        "       bnz     1b              \n"
        "2:                             \n"
-       : "=&r"(val)
-       : "r"(&v->counter)
+       : "=&r"(val), "+ATO"(v->counter)
+       :
        : "cc");        /* memory clobber comes from smp_mb() */
 
        smp_mb();
@@ -228,17 +228,17 @@ static inline s64 
arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
        smp_mb();
 
        __asm__ __volatile__(
-       "1:     llockd  %0, [%2]        \n"
+       "1:     llockd  %0, %2          \n"
        "       brne    %L0, %L4, 2f    # continue to add since v != u \n"
        "       breq.d  %H0, %H4, 3f    # return since v == u \n"
        "2:                             \n"
        "       add.f   %L1, %L0, %L3   \n"
        "       adc     %H1, %H0, %H3   \n"
-       "       scondd  %1, [%2]        \n"
+       "       scondd  %1, %2          \n"
        "       bnz     1b              \n"
        "3:                             \n"
-       : "=&r"(old), "=&r" (temp)
-       : "r"(&v->counter), "r"(a), "r"(u)
+       : "=&r"(old), "=&r" (temp), "+ATO"(v->counter)
+       : "r"(a), "r"(u)
        : "cc");        /* memory clobber comes from smp_mb() */
 
        smp_mb();
-- 
2.25.1


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to