Hi Peter, On Fri, Apr 22, 2016 at 11:04:27AM +0200, Peter Zijlstra wrote: > Implement FETCH-OP atomic primitives, these are very similar to the > existing OP-RETURN primitives we already have, except they return the > value of the atomic variable _before_ modification. > > This is especially useful for irreversible operations -- such as > bitops (because it becomes impossible to reconstruct the state prior > to modification). > > Signed-off-by: Peter Zijlstra (Intel) <[email protected]> > --- > arch/metag/include/asm/atomic.h | 2 + > arch/metag/include/asm/atomic_lnkget.h | 36 > +++++++++++++++++++++++++++++---- > arch/metag/include/asm/atomic_lock1.h | 33 ++++++++++++++++++++++++++---- > 3 files changed, 63 insertions(+), 8 deletions(-) > > --- a/arch/metag/include/asm/atomic.h > +++ b/arch/metag/include/asm/atomic.h > @@ -17,6 +17,8 @@ > #include <asm/atomic_lnkget.h> > #endif > > +#define atomic_fetch_or atomic_fetch_or > + > #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) > > #define atomic_dec_return(v) atomic_sub_return(1, (v)) > --- a/arch/metag/include/asm/atomic_lnkget.h > +++ b/arch/metag/include/asm/atomic_lnkget.h > @@ -69,16 +69,44 @@ static inline int atomic_##op##_return(i > return result; \ > } > > -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) > +#define ATOMIC_FETCH_OP(op) \ > +static inline int atomic_fetch_##op(int i, atomic_t *v) > \ > +{ \ > + int result, temp; \ > + \ > + smp_mb(); \ > + \ > + asm volatile ( \ > + "1: LNKGETD %1, [%2]\n" \ > + " " #op " %0, %1, %3\n" \
i was hoping never to have to think about meta asm constraints again :-P
and/or/xor are only available in the data units, as determined by %1 in
this case, so the constraint for result shouldn't have "a" in it.
diff --git a/arch/metag/include/asm/atomic_lnkget.h
b/arch/metag/include/asm/atomic_lnkget.h
index 50ad05050947..def2c642f053 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)
\
" ANDT %0, %0, #HI(0x3f000000)\n" \
" CMPT %0, #HI(0x02000000)\n" \
" BNZ 1b\n" \
- : "=&d" (temp), "=&da" (result) \
+ : "=&d" (temp), "=&d" (result) \
: "da" (&v->counter), "bd" (i) \
: "cc"); \
That also ensures the "bd" constraint for %3 (meaning "an op2 register
where op1 [%1 in this case] is a data unit register and the instruction
supports O2R") is consistent.
So with that change this patch looks good to me:
Acked-by: James Hogan <[email protected]>
Note that for the ATOMIC_OP_RETURN() case (add/sub only) either address
or data units can be used (hence the "da" for %1), but then the "bd"
constraint on %3 is wrong as op1 [%1] may not be in data unit (sorry I
didn't spot that at the time). I'll queue a fix, something like below
probably ("br" means "An Op2 register and the instruction supports O2R",
i.e. op1/%1 doesn't have to be a data unit register):
diff --git a/arch/metag/include/asm/atomic_lnkget.h
b/arch/metag/include/asm/atomic_lnkget.h
index 50ad05050947..def2c642f053 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -61,7 +61,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)
\
" CMPT %0, #HI(0x02000000)\n" \
" BNZ 1b\n" \
: "=&d" (temp), "=&da" (result) \
- : "da" (&v->counter), "bd" (i) \
+ : "da" (&v->counter), "br" (i) \
: "cc"); \
\
smp_mb(); \
\
\
Thanks
James
> + " LNKSETD [%2], %0\n" \
> + " DEFR %0, TXSTAT\n" \
> + " ANDT %0, %0, #HI(0x3f000000)\n" \
> + " CMPT %0, #HI(0x02000000)\n" \
> + " BNZ 1b\n" \
> + : "=&d" (temp), "=&da" (result) \
> + : "da" (&v->counter), "bd" (i) \
> + : "cc"); \
> + \
> + smp_mb(); \
> + \
> + return result; \
> +}
> +
> +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
>
> ATOMIC_OPS(add)
> ATOMIC_OPS(sub)
>
> -ATOMIC_OP(and)
> -ATOMIC_OP(or)
> -ATOMIC_OP(xor)
> +#undef ATOMIC_OPS
> +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
> +
> +ATOMIC_OPS(and)
> +ATOMIC_OPS(or)
> +ATOMIC_OPS(xor)
>
> #undef ATOMIC_OPS
> +#undef ATOMIC_FETCH_OP
> #undef ATOMIC_OP_RETURN
> #undef ATOMIC_OP
>
> --- a/arch/metag/include/asm/atomic_lock1.h
> +++ b/arch/metag/include/asm/atomic_lock1.h
> @@ -64,15 +64,40 @@ static inline int atomic_##op##_return(i
> return result; \
> }
>
> -#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
> +#define ATOMIC_FETCH_OP(op, c_op) \
> +static inline int atomic_fetch_##op(int i, atomic_t *v)
> \
> +{ \
> + unsigned long result; \
> + unsigned long flags; \
> + \
> + __global_lock1(flags); \
> + result = v->counter; \
> + fence(); \
> + v->counter c_op i; \
> + __global_unlock1(flags); \
> + \
> + return result; \
> +}
> +
> +#define ATOMIC_OPS(op, c_op) \
> + ATOMIC_OP(op, c_op) \
> + ATOMIC_OP_RETURN(op, c_op) \
> + ATOMIC_FETCH_OP(op, c_op)
>
> ATOMIC_OPS(add, +=)
> ATOMIC_OPS(sub, -=)
> -ATOMIC_OP(and, &=)
> -ATOMIC_OP(or, |=)
> -ATOMIC_OP(xor, ^=)
>
> #undef ATOMIC_OPS
> +#define ATOMIC_OPS(op, c_op) \
> + ATOMIC_OP(op, c_op) \
> + ATOMIC_FETCH_OP(op, c_op)
> +
> +ATOMIC_OPS(and, &=)
> +ATOMIC_OPS(or, |=)
> +ATOMIC_OPS(xor, ^=)
> +
> +#undef ATOMIC_OPS
> +#undef ATOMIC_FETCH_OP
> #undef ATOMIC_OP_RETURN
> #undef ATOMIC_OP
>
>
>
signature.asc
Description: Digital signature

