On Tue, Nov 24, 2020 at 01:43:54PM +0000, guo...@kernel.org wrote:
> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> index 59dd7be..6f5f438 100644
> --- a/arch/riscv/include/asm/Kbuild
> +++ b/arch/riscv/include/asm/Kbuild
> @@ -6,3 +6,6 @@ generic-y += kvm_para.h
>  generic-y += local64.h
>  generic-y += user.h
>  generic-y += vmlinux.lds.h
> +generic-y += mcs_spinlock.h
> +generic-y += qrwlock.h
> +generic-y += qspinlock.h
> diff --git a/arch/riscv/include/asm/cmpxchg.h 
> b/arch/riscv/include/asm/cmpxchg.h
> index 5609185..e178700 100644
> --- a/arch/riscv/include/asm/cmpxchg.h
> +++ b/arch/riscv/include/asm/cmpxchg.h
> @@ -16,7 +16,43 @@
>       __typeof__(ptr) __ptr = (ptr);                                  \
>       __typeof__(new) __new = (new);                                  \
>       __typeof__(*(ptr)) __ret;                                       \
> +     register unsigned long __rc, tmp, align, addr;                  \
>       switch (size) {                                                 \
> +     case 2:                                                         \
> +             align = ((unsigned long) __ptr & 0x3);                  \
> +             addr = ((unsigned long) __ptr & ~0x3);                  \
> +             if (align) {                                            \
> +             __asm__ __volatile__ (                                  \
> +                     "0:     lr.w %0, 0(%z4)\n"                      \
> +                     "       move %1, %0\n"                          \
> +                     "       slli %1, %1, 16\n"                      \
> +                     "       srli %1, %1, 16\n"                      \
> +                     "       move %2, %z3\n"                         \
> +                     "       slli %2, %2, 16\n"                      \
> +                     "       or   %1, %2, %1\n"                      \
> +                     "       sc.w %2, %1, 0(%z4)\n"                  \
> +                     "       bnez %2, 0b\n"                          \
> +                     "       srli %0, %0, 16\n"                      \
> +                     : "=&r" (__ret), "=&r" (tmp), "=&r" (__rc)      \
> +                     : "rJ" (__new), "rJ"(addr)                      \
> +                     : "memory");                                    \
> +             } else {                                                \
> +             __asm__ __volatile__ (                                  \
> +                     "0:     lr.w %0, (%z4)\n"                       \
> +                     "       move %1, %0\n"                          \
> +                     "       srli %1, %1, 16\n"                      \
> +                     "       slli %1, %1, 16\n"                      \
> +                     "       move %2, %z3\n"                         \
> +                     "       or   %1, %2, %1\n"                      \
> +                     "       sc.w %2, %1, 0(%z4)\n"                  \
> +                     "       bnez %2, 0b\n"                          \
> +                     "       slli %0, %0, 16\n"                      \
> +                     "       srli %0, %0, 16\n"                      \
> +                     : "=&r" (__ret), "=&r" (tmp), "=&r" (__rc)      \
> +                     : "rJ" (__new), "rJ"(addr)                      \
> +                     : "memory");                                    \
> +             }                                                       \
> +             break;                                                  \
>       case 4:                                                         \
>               __asm__ __volatile__ (                                  \
>                       "       amoswap.w %0, %2, %1\n"                 \

I'm pretty sure there's a handfull of implementations like this out
there... if only we could share.

Anyway, this too should be an independent patch.

Reply via email to