Module: xenomai-forge
Branch: master
Commit: cddc6fdef6144966be6ea0a266ecabe31fa93409
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=cddc6fdef6144966be6ea0a266ecabe31fa93409

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Mon Nov 14 20:48:15 2011 +0100

asm: refactor atomic.h

---

 include/asm-arm/atomic.h        |  473 +++------------------------------------
 include/asm-blackfin/atomic.h   |   47 +----
 include/asm-generic/Makefile.am |    1 +
 include/asm-generic/Makefile.in |    1 +
 include/asm-generic/atomic.h    |   85 +++++++
 include/asm-nios2/atomic.h      |   44 +----
 include/asm-powerpc/atomic.h    |  207 +-----------------
 include/asm-sh/atomic.h         |   76 +------
 include/asm-x86/atomic.h        |   98 +--------
 9 files changed, 136 insertions(+), 896 deletions(-)

diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 2a6c38e..b7b36a2 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -23,467 +23,50 @@
 #ifndef _XENO_ASM_ARM_ATOMIC_H
 #define _XENO_ASM_ARM_ATOMIC_H
 
-#include <asm/xenomai/features.h>
-
-typedef struct { unsigned long counter; } xnarch_atomic_t;
-typedef xnarch_atomic_t atomic_counter_t;
-
-#define xnarch_atomic_get(v)   (*(volatile unsigned long *)(&(v)->counter))
-static __inline__ void
-xnarch_atomic_set(xnarch_atomic_t *ptr, unsigned long val)
-{
-       ptr->counter = val;
-}
-
-extern void __xnarch_xchg_called_with_bad_pointer(void);
+#ifdef __KERNEL__
+#include <linux/version.h>
+#include <asm/irqflags.h>
 
-#define xnarch_read_memory_barrier()           xnarch_memory_barrier()
-#define xnarch_write_memory_barrier()          xnarch_memory_barrier()
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#error "Linux version at least 2.6.35 is required for correct atomic 
operations"
+#endif /* Linux < 2.6.35 */
 
 #if __LINUX_ARM_ARCH__ >= 6
-#ifndef CONFIG_SMP
-#define xnarch_memory_barrier() \
-       __asm__ __volatile__ ("": /* */ : /* */ :"memory")
-#else /* SMP */
-#if __LINUX_ARM_ARCH__ >= 7
-#define xnarch_memory_barrier()        \
-       __asm__ __volatile__ ("dmb" : /* */ : /* */ : "memory")
-#else /* __LINUX_ARM_ARCH == 6 */
-#define xnarch_memory_barrier()        \
-       __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5"      \
-                             : /* */ : "r" (0) : "memory")
-#endif /* __LINUX_ARM_ARCH == 6 */
-#endif /* CONFIG_SMP */
-
-#ifndef __KERNEL__
-#define cpu_relax()                            xnarch_memory_barrier()
-#endif /* __KERNEL__ */
-
-static inline unsigned long
-__xnarch_xchg(volatile void *ptr, unsigned long x, int size)
-{
-       unsigned long ret;
-       unsigned long tmp;
-
-       xnarch_memory_barrier();
-
-       switch (size) {
-       case 1:
-               asm volatile("@ __xchg1\n"
-               "1:     ldrexb  %0, [%4]\n"
-               "       strexb  %1, %3, [%4]\n"
-               "       teq     %1, #0\n"
-               "       bne     1b"
-                       : "=&r" (ret), "=&r" (tmp),
-                         "+Qo" (*(char *)ptr)
-                       : "r" (x), "r" (ptr)
-                       : "cc");
-               break;
-       case 4:
-               asm volatile("@ __xchg4\n"
-               "1:     ldrex   %0, [%4]\n"
-               "       strex   %1, %3, [%4]\n"
-               "       teq     %1, #0\n"
-               "       bne     1b"
-                       : "=&r" (ret), "=&r" (tmp),
-                         "+Qo" (*(unsigned *)ptr)
-                       : "r" (x), "r" (ptr)
-                       : "cc");
-               break;
-       default:
-               __xnarch_xchg_called_with_bad_pointer(), ret = 0;
-               break;
-       }
-       xnarch_memory_barrier();
-
-       return ret;
-}
-
-#define xnarch_atomic_xchg(ptr,x)                                      \
-    ({                                                                 \
-           __typeof__(*(ptr)) _x_ = (x);                               \
-           (__typeof__(*(ptr)))                                        \
-                   __xnarch_xchg((ptr),(unsigned long)_x_, sizeof(*(ptr))); \
-    })
-
-static inline void xnarch_atomic_inc(xnarch_atomic_t *v)
-{
-       unsigned long tmp;
-       unsigned long result;
-
-       __asm__ __volatile__("@ atomic_add\n"
-"1:    ldrex   %0, [%3]\n"
-"      add     %0, %0, %4\n"
-"      strex   %1, %0, [%3]\n"
-"      teq     %1, #0\n"
-"      bne     1b"
-       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-       : "r" (&v->counter), "Ir" (1)
-       : "cc");
-}
-
-static inline void xnarch_atomic_dec(xnarch_atomic_t *v)
-{
-       unsigned long tmp;
-       unsigned long result;
-
-       __asm__ __volatile__("@ atomic_sub\n"
-"1:    ldrex   %0, [%3]\n"
-"      sub     %0, %0, %4\n"
-"      strex   %1, %0, [%3]\n"
-"      teq     %1, #0\n"
-"      bne     1b"
-       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-       : "r" (&v->counter), "Ir" (1)
-       : "cc");
-}
-
-static inline void
-xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
 {
     unsigned long tmp, tmp2;
 
     __asm__ __volatile__("@ atomic_set_mask\n"
-"1:    ldrex   %0, [%3]\n\t"
-"      orr     %0, %0, %4\n\t"
-"      strex   %1, %0, [%3]\n"
-"      teq     %1, #0\n"
-"      bne     1b"
-       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
-       : "r" (addr), "Ir" (mask)
-       : "cc");
-}
-
-static inline void
-xnarch_atomic_clear_mask(unsigned long *addr, unsigned long mask)
-{
-       unsigned long tmp, tmp2;
-
-       __asm__ __volatile__("@ atomic_clear_mask\n"
-"1:    ldrex   %0, [%3]\n"
-"      bic     %0, %0, %4\n"
-"      strex   %1, %0, [%3]\n"
-"      teq     %1, #0\n"
-"      bne     1b"
-       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
-       : "r" (addr), "Ir" (mask)
-       : "cc");
-}
-
-static inline unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *ptr,
-                     unsigned long oldval, unsigned long newval)
-{
-       unsigned long curval, res;
-
-       xnarch_memory_barrier();
-
-       do {
-               __asm__ __volatile__("@ atomic_cmpxchg\n"
-               "ldrex  %1, [%3]\n"
-               "mov    %0, #0\n"
-               "teq    %1, %4\n"
-               "strexeq %0, %5, [%3]\n"
-                   : "=&r" (res), "=&r" (curval), "+Qo" (ptr->counter)
-                   : "r" (&ptr->counter), "Ir" (oldval), "r" (newval)
-                   : "cc");
-       } while (res);
-
-       xnarch_memory_barrier();
-
-       return curval;
-}
-
-static inline int xnarch_atomic_inc_and_test(xnarch_atomic_t *v)
-{
-       unsigned long tmp;
-       unsigned long result;
-
-       xnarch_memory_barrier();
-
-       __asm__ __volatile__("@ atomic_add_return\n"
-"1:    ldrex   %0, [%3]\n"
-"      add     %0, %0, %4\n"
-"      strex   %1, %0, [%3]\n"
-"      teq     %1, #0\n"
-"      bne     1b"
-       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-       : "r" (&v->counter), "Ir" (1)
-       : "cc");
-
-       xnarch_memory_barrier();
-
-       return result == 0;
+                        "1:    ldrex   %0, [%3]\n\t"
+                        "      orr     %0, %0, %4\n\t"
+                        "      strex   %1, %0, [%3]\n"
+                        "      teq     %1, #0\n"
+                        "      bne     1b"
+                        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+                        : "r" (addr), "Ir" (mask)
+                        : "cc");
 }
-
-static inline int xnarch_atomic_dec_and_test(xnarch_atomic_t *v)
-{
-       unsigned long tmp;
-       unsigned long result;
-
-       xnarch_memory_barrier();
-
-       __asm__ __volatile__("@ atomic_sub_return\n"
-"1:    ldrex   %0, [%3]\n"
-"      sub     %0, %0, %4\n"
-"      strex   %1, %0, [%3]\n"
-"      teq     %1, #0\n"
-"      bne     1b"
-       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-       : "r" (&v->counter), "Ir" (1)
-       : "cc");
-
-       xnarch_memory_barrier();
-
-       return result == 0;
-}
-#else /* ARM arch <= 5 */
-
-#ifdef __KERNEL__
-
-#include <linux/bitops.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-static inline void
-xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
+#else /* arm <= armv5 */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
 {
-    unsigned long flags;
+       unsigned long flags;
 
-    local_irq_save_hw(flags);
-    *addr |= mask;
-    local_irq_restore_hw(flags);
+       local_irq_save_hw(flags);
+       *addr |= mask;
+       local_irq_restore_hw(flags);
 }
-
-#define xnarch_memory_barrier() smp_mb()
-#define xnarch_atomic_xchg(ptr,x) xchg(ptr,x)
-#define xnarch_atomic_inc(pcounter) \
-       atomic_inc((atomic_t *)pcounter)
-#define xnarch_atomic_dec(pcounter) \
-       atomic_dec((atomic_t *)pcounter)
-#define xnarch_atomic_clear_mask(addr, mask) \
-       atomic_clear_mask((mask), (addr))
-#define xnarch_atomic_cmpxchg(pcounter, oldval, newval) \
-       atomic_cmpxchg((atomic_t *)(pcounter), (oldval), (newval))
-#define xnarch_atomic_inc_and_test(pcounter) \
-       atomic_inc_and_test((atomic_t *)pcounter)
-#define xnarch_atomic_dec_and_test(pcounter) \
-       atomic_dec_and_test((atomic_t *)pcounter)
+#endif /* arm <= armv5 */
 
 #else /* !__KERNEL__ */
+#include <asm/xenomai/features.h>
 
-#include <asm/xenomai/syscall.h>
-#include <nucleus/compiler.h>
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-static __inline__ unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
-    unsigned long ret;
-
-    if (size != 4) {
-       __xnarch_xchg_called_with_bad_pointer();
-       return 0;
-    }
-
-#if defined(CONFIG_XENO_ARM_SA1100)
-    XENOMAI_SYSCALL5(__xn_sys_arch,
-                    XENOMAI_SYSARCH_XCHG, ptr, x, size, &ret);
-#else
-    asm volatile("@ __xchg4\n"
-"   swp            %0, %1, [%2]"
-    : "=&r" (ret)
-    : "r" (x), "r" (ptr)
-    : "memory", "cc");
-#endif
-    return ret;
-}
-
-#define xnarch_atomic_xchg(ptr,x) \
-    ({                                                                        \
-    __typeof__(*(ptr)) _x_ = (x);                                         \
-    (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
-    })
-
-
-#ifdef CONFIG_SMP
-static __inline__ unsigned long
-xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
-{
-    unsigned long ret;
-
-    XENOMAI_SYSCALL4(__xn_sys_arch,
-                    XENOMAI_SYSARCH_ATOMIC_ADD_RETURN, i, v, &ret);
-    return ret;
-}
-
-static __inline__ unsigned long
-xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
-{
-    unsigned long ret;
-
-    XENOMAI_SYSCALL4(__xn_sys_arch,
-                    XENOMAI_SYSARCH_ATOMIC_ADD_RETURN, -i, v, &ret);
-    return ret;
-}
-
-static inline void
-xnarch_atomic_set_mask(unsigned long *addr, unsigned long mask)
-{
-    XENOMAI_SYSCALL3(__xn_sys_arch,
-                    XENOMAI_SYSARCH_ATOMIC_SET_MASK, mask, addr);
-}
-
-static inline void
-xnarch_atomic_clear_mask(unsigned long *addr, unsigned long mask)
-{
-    XENOMAI_SYSCALL3(__xn_sys_arch,
-                    XENOMAI_SYSARCH_ATOMIC_CLEAR_MASK, mask, addr);
-}
-#else /* ARM_ARCH <= 5 && !CONFIG_SMP */
-
-static __inline__ unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *ptr,
-                     unsigned long oldval, unsigned long newval)
-{
-       register unsigned long asm_old asm("r0") = oldval;
-       register unsigned long asm_new asm("r1") = newval;
-       register unsigned long *asm_ptr asm("r2") =
-               (unsigned long *)&ptr->counter;
-       register unsigned long asm_lr asm("lr");
-       register unsigned long asm_tmp asm("r3");
-
-       do {
-               asm volatile (
-                       "mov %1, #0xffff0fff\n\t"
-                       "mov lr, pc\n\t"
-                       "add pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"
-                       : "+r"(asm_old), "=&r"(asm_tmp), "=r"(asm_lr)
-                       : "r"(asm_new), "r"(asm_ptr)
-                       : "cc", "memory");
-               if (likely(!asm_old)) {
-                       asm_old = oldval;
-                       goto done;
-               }
-       } while (unlikely((asm_old = *asm_ptr) == oldval));
-  done:
-       return asm_old;
-}
-
-static __inline__ unsigned long
-xnarch_atomic_add_return(int i, xnarch_atomic_t *v)
-{
-       register unsigned long asm_old asm("r0");
-       register unsigned long asm_new asm("r1");
-       register unsigned long *asm_ptr asm("r2") =
-               (unsigned long *)&v->counter;
-       register unsigned long asm_lr asm("lr");
-       register unsigned long asm_tmp asm("r3");
-
-       asm volatile ( \
-               "1: @ xnarch_atomic_add\n\t"
-               "ldr    %0, [%4]\n\t"
-               "mov    %1, #0xffff0fff\n\t"
-               "add    lr, pc, #4\n\t"
-               "add    %3, %0, %5\n\t"
-               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"
-               "bcc    1b"
-               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new)
-               : "r" (asm_ptr), "rIL"(i)
-               : "ip", "cc", "memory");
-       return asm_new;
-}
-
-static __inline__ unsigned long
-xnarch_atomic_sub_return(int i, xnarch_atomic_t *v)
-{
-       register unsigned long asm_old asm("r0");
-       register unsigned long asm_new asm("r1");
-       register unsigned long *asm_ptr asm("r2") =
-               (unsigned long *)&v->counter;
-       register unsigned long asm_lr asm("lr");
-       register unsigned long asm_tmp asm("r3");
-
-       asm volatile ( \
-               "1: @ xnarch_atomic_sub\n\t"
-               "ldr    %0, [%4]\n\t"
-               "mov    %1, #0xffff0fff\n\t"
-               "add    lr, pc, #4\n\t"
-               "sub    %3, %0, %5\n\t"
-               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"
-               "bcc    1b"
-               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new)
-               : "r" (asm_ptr), "rIL"(i)
-               : "ip", "cc", "memory");
-       return asm_new;
-}
-
-static __inline__ void
-xnarch_atomic_set_mask(unsigned long *v, long mask)
-{
-       register unsigned long asm_old asm("r0");
-       register unsigned long asm_new asm("r1");
-       register unsigned long *asm_ptr asm("r2") = v;
-       register unsigned long asm_lr asm("lr");
-       register unsigned long asm_tmp asm("r3");
-
-       asm volatile ( \
-               "1: @ xnarch_atomic_set_mask\n\t" \
-               "ldr    %0, [%4]\n\t" \
-               "mov    %1, #0xffff0fff\n\t" \
-               "add    lr, pc, #4\n\t" \
-               "orr    %3, %0, %5\n\t"\
-               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t" \
-               "bcc    1b" \
-               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new) \
-               : "r" (asm_ptr), "rIL"(mask) \
-               : "ip", "cc", "memory");
-}
-
-static __inline__ void
-xnarch_atomic_clear_mask(unsigned long *v, long mask)
-{
-       register unsigned long asm_old asm("r0");
-       register unsigned long asm_new asm("r1");
-       register unsigned long *asm_ptr asm("r2") = v;
-       register unsigned long asm_lr asm("lr");
-       register unsigned long asm_tmp asm("r3");
-
-       asm volatile ( \
-               "1: @ xnarch_atomic_clear_mask\n\t" \
-               "ldr    %0, [%4]\n\t" \
-               "mov    %1, #0xffff0fff\n\t" \
-               "add    lr, pc, #4\n\t" \
-               "bic    %3, %0, %5\n\t" \
-               "add    pc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t" \
-               "bcc    1b" \
-               : "=&r" (asm_old), "=&r"(asm_tmp), "=r"(asm_lr), "=r"(asm_new) \
-               : "r" (asm_ptr), "rIL"(mask) \
-               : "ip", "cc", "memory");
-}
-
-
-#endif /* ARM_ARCH <= 5 && !CONFIG_SMP */
-
-#if defined(CONFIG_SMP) && defined(CONFIG_XENO_CPU_XSC3)
-#define xnarch_memory_barrier() \
-       __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
-                             : /* */ : "r" (0) : "memory")
-#else /* !XSC3 || !SMP */
+#if __LINUX_ARM_ARCH__ <= 5 || !defined(CONFIG_SMP)
 #define xnarch_memory_barrier() \
-       __asm__ __volatile__ ("": /* */ : /* */ :"memory")
-#endif /* !XSC3 || !SMP */
-
-#define cpu_relax()                            xnarch_memory_barrier()
+       __asm__ __volatile__ ("": /* */ : /* */ :"memory")
+#endif /* arm <= armv5 || !CONFIG_SMP */
 
-#define xnarch_atomic_inc(pcounter)            (void) 
xnarch_atomic_add_return(1, pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)   (xnarch_atomic_sub_return(1, 
pcounter) == 0)
-#endif /* __KERNEL__ */
-#endif /* ARM arch <= 5 */
+#endif /* !__KERNEL__ */
 
-typedef unsigned long atomic_flags_t;
+#include <asm-generic/xenomai/atomic.h>
 
 #endif /* !_XENO_ASM_ARM_ATOMIC_H */
diff --git a/include/asm-blackfin/atomic.h b/include/asm-blackfin/atomic.h
index 3e34b2f..6b7fc06 100644
--- a/include/asm-blackfin/atomic.h
+++ b/include/asm-blackfin/atomic.h
@@ -21,61 +21,20 @@
 #define _XENO_ASM_BLACKFIN_ATOMIC_H
 
 #ifdef __KERNEL__
-
-#include <linux/version.h>
-#include <linux/bitops.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-#define xnarch_atomic_xchg(ptr,v)      xchg(ptr,v)
-#define xnarch_memory_barrier()                smp_mb()
-#define xnarch_read_memory_barrier()   rmb()
-#define xnarch_write_memory_barrier()  wmb()
-
-#define xnarch_atomic_set(pcounter,i)           atomic_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)             atomic_read(pcounter)
-#define xnarch_atomic_inc(pcounter)             atomic_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)             atomic_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter)    atomic_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)    atomic_dec_and_test(pcounter)
+#include <asm/xenomai/hal.h>
 
 #define xnarch_atomic_set_mask(pflags, mask)   \
        rthal_atomic_set_mask((pflags), (mask))
 
-#define xnarch_atomic_clear_mask(pflags, mask) \
+#define xnarch_atomic_clear_mask(pflags, mask)                 \
        rthal_atomic_clear_mask((pflags), (mask))
 
-#define xnarch_atomic_cmpxchg(pcounter, old, new) \
-       atomic_cmpxchg((pcounter), (old), (new))
-
-typedef atomic_t atomic_counter_t;
-typedef atomic_t xnarch_atomic_t;
-
 #else /* !__KERNEL__ */
 
-#include <asm/xenomai/features.h>
-#include <asm/xenomai/syscall.h>
-
-typedef struct { int counter; } xnarch_atomic_t;
-
-#define xnarch_atomic_get(v)           ((v)->counter)
-#define xnarch_atomic_set(v, i)        (((v)->counter) = i)
-
-static __inline__ unsigned long xnarch_atomic_xchg(unsigned long *ptr, 
unsigned long x)
-{
-       unsigned long oldval;
-       XENOMAI_SYSCALL4(__xn_sys_arch,__xn_lsys_xchg,ptr,x,&oldval);
-       return oldval;
-}
-
 #define xnarch_memory_barrier()     __asm__ __volatile__("": : :"memory")
 
-#define cpu_relax()                    xnarch_memory_barrier()
-#define xnarch_read_memory_barrier()   xnarch_memory_barrier()
-#define xnarch_write_memory_barrier()  xnarch_memory_barrier()
-
 #endif /* __KERNEL__ */
 
-typedef unsigned long atomic_flags_t;
+#include <asm-generic/xenomai/atomic.h>
 
 #endif /* !_XENO_ASM_BLACKFIN_ATOMIC_H */
diff --git a/include/asm-generic/Makefile.am b/include/asm-generic/Makefile.am
index 700d0f0..d9241f3 100644
--- a/include/asm-generic/Makefile.am
+++ b/include/asm-generic/Makefile.am
@@ -3,6 +3,7 @@ noinst_HEADERS = sem_heap.h
 
 includesub_HEADERS = \
        arith.h \
+       atomic.h \
        features.h \
        hal.h \
        pci_ids.h \
diff --git a/include/asm-generic/Makefile.in b/include/asm-generic/Makefile.in
index 2251abb..5fd4f84 100644
--- a/include/asm-generic/Makefile.in
+++ b/include/asm-generic/Makefile.in
@@ -280,6 +280,7 @@ includesubdir = $(includedir)/asm-generic
 noinst_HEADERS = sem_heap.h
 includesub_HEADERS = \
        arith.h \
+       atomic.h \
        features.h \
        hal.h \
        pci_ids.h \
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
new file mode 100644
index 0000000..5ab1e70
--- /dev/null
+++ b/include/asm-generic/atomic.h
@@ -0,0 +1,85 @@
+#ifndef ATOMIC_H
+#define ATOMIC_H
+
+#include <asm/xenomai/features.h>
+
+typedef unsigned long atomic_flags_t;
+
+#ifdef __KERNEL__
+#include <linux/bitops.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/xenomai/wrappers.h>
+
+typedef atomic_long_t atomic_counter_t;
+typedef atomic_long_t xnarch_atomic_t;
+
+#define xnarch_memory_barrier()                smp_mb()
+#define xnarch_read_memory_barrier()    rmb()
+#define xnarch_write_memory_barrier()   wmb()
+
+#define xnarch_atomic_set(pcounter,i)  atomic_long_set(pcounter,i)
+#define xnarch_atomic_get(pcounter)    atomic_long_read(pcounter)
+#define xnarch_atomic_inc(pcounter)    atomic_long_inc(pcounter)
+#define xnarch_atomic_dec(pcounter)    atomic_long_dec(pcounter)
+#define xnarch_atomic_inc_and_test(pcounter) \
+       atomic_long_inc_and_test(pcounter)
+#define xnarch_atomic_dec_and_test(pcounter) \
+       atomic_long_dec_and_test(pcounter)
+#define xnarch_atomic_cmpxchg(pcounter,old,new) \
+       atomic_long_cmpxchg((pcounter),(old),(new))
+
+#define xnarch_atomic_xchg(ptr,x)      xchg(ptr,x)
+
+/* atomic_set_mask, atomic_clear_mask are not standard among linux
+   ports */
+#ifndef xnarch_atomic_set_mask
+#define xnarch_atomic_set_mask(pflags,mask) atomic_set_mask((mask),(pflags))
+#endif
+
+#ifndef xnarch_atomic_clear_mask
+#define xnarch_atomic_clear_mask(pflags,mask) 
atomic_clear_mask((mask),(pflags))
+#endif
+
+#else /* !__KERNEL__ */
+#include <xeno_config.h>
+
+#ifndef xnarch_atomic_t
+typedef struct { unsigned long counter; } __xnarch_atomic_t;
+#define xnarch_atomic_t __xnarch_atomic_t
+#endif
+
+#ifndef xnarch_memory_barrier
+#define xnarch_memory_barrier() __sync_synchronize()
+#endif
+
+#ifndef xnarch_read_memory_barrier
+#define xnarch_read_memory_barrier() xnarch_memory_barrier()
+#endif
+
+#ifndef xnarch_write_memory_barrier
+#define xnarch_write_memory_barrier() xnarch_memory_barrier()
+#endif
+
+#ifndef cpu_relax
+#define cpu_relax() xnarch_memory_barrier()
+#endif
+
+#ifndef xnarch_atomic_get
+#define xnarch_atomic_get(v)           ((v)->counter)
+#endif
+
+#ifndef xnarch_atomic_set
+#define xnarch_atomic_set(v,i)         (((v)->counter) = (i))
+#endif
+
+#ifndef xnarch_atomic_cmpxchg
+#define xnarch_atomic_cmpxchg(v, o, n)                 \
+       __sync_val_compare_and_swap(&(v)->counter,      \
+                                   (unsigned long)(o), \
+                                   (unsigned long)(n))
+#endif
+
+#endif /* !__KERNEL__ */
+
+#endif /* ATOMIC_H */
diff --git a/include/asm-nios2/atomic.h b/include/asm-nios2/atomic.h
index b7bea91..3a975cd 100644
--- a/include/asm-nios2/atomic.h
+++ b/include/asm-nios2/atomic.h
@@ -21,15 +21,7 @@
 #define _XENO_ASM_NIOS2_ATOMIC_H
 
 #ifdef __KERNEL__
-
-#include <linux/bitops.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-#define xnarch_atomic_xchg(ptr,v)      xchg(ptr,v)
-#define xnarch_memory_barrier()                smp_mb()
-#define xnarch_read_memory_barrier()   rmb()
-#define xnarch_write_memory_barrier()  wmb()
+#include <asm/irqflags.h>
 
 static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
 {
@@ -40,44 +32,10 @@ static inline void atomic_set_mask(unsigned long mask, 
unsigned long *addr)
        local_irq_restore_hw(flags);
 }
 
-#define xnarch_atomic_set(pcounter,i)          atomic_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)            atomic_read(pcounter)
-#define xnarch_atomic_inc(pcounter)            atomic_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)            atomic_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter)   atomic_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)   atomic_dec_and_test(pcounter)
-#define xnarch_atomic_set_mask(pflags,mask)    atomic_set_mask(mask, pflags)
-#define xnarch_atomic_clear_mask(pflags,mask)  atomic_clear_mask(mask, pflags)
-#define xnarch_atomic_cmpxchg(pcounter,old,new) atomic_cmpxchg((pcounter), 
(old), (new))
-
-typedef atomic_t atomic_counter_t;
-typedef atomic_t xnarch_atomic_t;
-
 #else /* !__KERNEL__ */
 
-#include <asm/xenomai/features.h>
-#include <asm/xenomai/syscall.h>
-
-typedef struct { int counter;} xnarch_atomic_t;
-
-#define xnarch_atomic_get(v)           ((v)->counter)
-#define xnarch_atomic_set(v, i)                (((v)->counter) = i)
-
-static __inline__ unsigned long xnarch_atomic_xchg(unsigned long *ptr, 
unsigned long x)
-{
-       unsigned long oldval;
-       XENOMAI_SYSCALL4(__xn_sys_arch, __xn_lsys_xchg, ptr, x, &oldval);
-       return oldval;
-}
-
 #define xnarch_memory_barrier()     __asm__ __volatile__("": : :"memory")
 
-#define cpu_relax()                    xnarch_memory_barrier()
-#define xnarch_read_memory_barrier()   xnarch_memory_barrier()
-#define xnarch_write_memory_barrier()  xnarch_memory_barrier()
-
 #endif /* __KERNEL__ */
 
-typedef unsigned long atomic_flags_t;
-
 #endif /* !_XENO_ASM_NIOS2_ATOMIC_H */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 42c3d72..6b689e0 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -23,21 +23,13 @@
 #ifndef _XENO_ASM_POWERPC_ATOMIC_H
 #define _XENO_ASM_POWERPC_ATOMIC_H
 
-#ifdef __KERNEL__
-
-#include <linux/bitops.h>
-#include <linux/version.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
+typedef unsigned long atomic_flags_t;
 
-#define xnarch_atomic_xchg(ptr,v)      xchg(ptr,v)
-#define xnarch_memory_barrier()                smp_mb()
-#define xnarch_read_memory_barrier()   rmb()
-#define xnarch_write_memory_barrier()  wmb()
+#ifdef __KERNEL__
 
 #ifdef CONFIG_PPC64
 static __inline__ void atomic64_clear_mask(unsigned long mask,
-                                        unsigned long *ptr)
+                                          unsigned long *ptr)
 {
     __asm__ __volatile__ ("\n\
 1:     ldarx   5,0,%0 \n\
@@ -48,9 +40,10 @@ static __inline__ void atomic64_clear_mask(unsigned long 
mask,
        : "r" (ptr), "r" (mask)
        : "r5", "cc", "memory");
 }
+#define xnarch_atomic_clear_mask(pflags,mask)  atomic64_clear_mask(mask,pflags)
 
 static __inline__ void atomic64_set_mask(unsigned long mask,
-                                      unsigned long *ptr)
+                                        unsigned long *ptr)
 {
     __asm__ __volatile__ ("\n\
 1:     ldarx   5,0,%0 \n\
@@ -61,205 +54,15 @@ static __inline__ void atomic64_set_mask(unsigned long 
mask,
        : "r" (ptr), "r" (mask)
        : "r5", "cc", "memory");
 }
-
-#define xnarch_atomic_set(pcounter,i)          atomic64_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)            atomic64_read(pcounter)
-#define xnarch_atomic_inc(pcounter)            atomic64_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)            atomic64_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter)   atomic64_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)   atomic64_dec_and_test(pcounter)
 #define xnarch_atomic_set_mask(pflags,mask)    atomic64_set_mask(mask,pflags)
-#define xnarch_atomic_clear_mask(pflags,mask)  atomic64_clear_mask(mask,pflags)
-#define xnarch_atomic_cmpxchg(pcounter,old,new) \
-      atomic64_cmpxchg((pcounter),(old),(new))
-
-typedef atomic64_t atomic_counter_t;
-typedef atomic64_t xnarch_atomic_t;
 
 #else /* !CONFIG_PPC64 */
  /* These are defined in arch/{ppc,powerpc}/kernel/misc[_32].S on 32-bit 
PowerPC */
 void atomic_set_mask(unsigned long mask, unsigned long *ptr);
 void atomic_clear_mask(unsigned long mask, unsigned long *ptr);
 
-#define xnarch_atomic_set(pcounter,i)          atomic_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)            atomic_read(pcounter)
-#define xnarch_atomic_inc(pcounter)            atomic_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)            atomic_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter)   atomic_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)   atomic_dec_and_test(pcounter)
-#define xnarch_atomic_set_mask(pflags,mask)    atomic_set_mask(mask,pflags)
-#define xnarch_atomic_clear_mask(pflags,mask)  atomic_clear_mask(mask,pflags)
-#define xnarch_atomic_cmpxchg(pcounter,old,new) \
-      atomic_cmpxchg((pcounter),(old),(new))
-
-typedef atomic_t atomic_counter_t;
-typedef atomic_t xnarch_atomic_t;
-
 #endif /* !CONFIG_PPC64 */
 
-#else /* !__KERNEL__ */
-
-#ifndef __powerpc64__
-typedef struct { unsigned int counter; } xnarch_atomic_t;
-/* Always enable the work-around for 405 boards in user-space for
-   now. */
-#define PPC405_ERR77(ra,rb)    "dcbt " #ra "," #rb ";"
-#else /* __powerpc64__ */
-typedef struct { unsigned long counter; } xnarch_atomic_t;
-#define PPC405_ERR77(ra,rb)
-#endif /* !__powerpc64__ */
-
-#define xnarch_atomic_get(v)   ((v)->counter)
-#define xnarch_atomic_set(v, i)        (((v)->counter) = (i))
-
-#ifdef CONFIG_SMP
-#define EIEIO_ON_SMP    "eieio\n"
-#define ISYNC_ON_SMP    "\n\tisync"
-#ifdef __powerpc64__
-#define LWSYNC_ON_SMP    "lwsync\n"
-#else
-#define LWSYNC_ON_SMP    "sync\n"
-#endif
-#else
-#define EIEIO_ON_SMP
-#define ISYNC_ON_SMP
-#define LWSYNC_ON_SMP
-#endif
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- *
- * (lifted from linux/include/asm-powerpc/system.h)
- */
-
-static __inline__ unsigned long
-    __xchg_u32(volatile void *p, unsigned long val)
-{
-    unsigned long prev;
-
-    __asm__ __volatile__(
-    EIEIO_ON_SMP
-"1: lwarx      %0,0,%2 \n"
-    PPC405_ERR77(0,%2)
-"   stwcx.     %3,0,%2 \n\
-    bne-       1b"
-    ISYNC_ON_SMP
-    : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
-    : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
-    : "cc", "memory");
-
-    return prev;
-}
-
-#if defined(__powerpc64__)
-static __inline__ unsigned long
-    __xchg_u64(volatile void *p, unsigned long val)
-{
-    unsigned long prev;
-
-    __asm__ __volatile__(
-    EIEIO_ON_SMP
-"1: ldarx      %0,0,%2 \n"
-    PPC405_ERR77(0,%2)
-"   stdcx.     %3,0,%2 \n\
-    bne-       1b"
-    ISYNC_ON_SMP
-    : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
-    : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
-    : "cc", "memory");
-
-    return prev;
-}
-#endif
-
-static __inline__ unsigned long
-    __xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
-    switch (size) {
-    case 4:
-       return __xchg_u32(ptr, x);
-#if defined(__powerpc64__)
-    case 8:
-       return __xchg_u64(ptr, x);
-#endif
-    }
-    return x;
-}
-
-#define xnarch_atomic_xchg(ptr,x) \
-    ({                                                                         
\
-       __typeof__(*(ptr)) _x_ = (x);                                          \
-       (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr)));\
-    })
-
-#define xnarch_memory_barrier()                __asm__ __volatile__ ("sync" : 
: : "memory")
-#define xnarch_read_memory_barrier()   xnarch_memory_barrier() /* lwsync would 
do */
-#define xnarch_write_memory_barrier()  xnarch_memory_barrier()
-#define cpu_relax()                    xnarch_memory_barrier()
-
-#ifdef __powerpc64__
-static __inline__ unsigned long
-__do_cmpxchg(volatile unsigned long *p,
-            unsigned long old, unsigned long newval)
-{
-       unsigned long prev;
-
-       __asm__ __volatile__ (
-       LWSYNC_ON_SMP
-"1:    ldarx   %0,0,%2         # __cmpxchg_u64\n\
-       cmpd    0,%0,%3\n\
-       bne-    2f\n\
-       stdcx.  %4,0,%2\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       "\n\
-2:"
-       : "=&r" (prev), "+m" (*p)
-       : "r" (p), "r" (old), "r" (newval)
-       : "cc", "memory");
-
-       return prev;
-}
-#else
-static __inline__ unsigned long
-__do_cmpxchg(volatile unsigned int *p,
-            unsigned long old, unsigned long newval)
-{
-       unsigned int prev;
-
-       __asm__ __volatile__ (
-       LWSYNC_ON_SMP
-"1:    lwarx   %0,0,%2         # __cmpxchg_u32\n\
-       cmpw    0,%0,%3\n\
-       bne-    2f\n"
-       PPC405_ERR77(0,%2)
-"      stwcx.  %4,0,%2\n\
-       bne-    1b"
-       ISYNC_ON_SMP
-       "\n\
-2:"
-       : "=&r" (prev), "+m" (*p)
-       : "r" (p), "r" (old), "r" (newval)
-       : "cc", "memory");
-
-       return prev;
-}
-#endif
-
-#include <asm/xenomai/features.h>
-
-static __inline__ unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *p,
-                     unsigned long old, unsigned long newval)
-{
-       return __do_cmpxchg(&p->counter, old, newval);
-}
-
 #endif /* __KERNEL__ */
 
-typedef unsigned long atomic_flags_t;
-
 #endif /* !_XENO_ASM_POWERPC_ATOMIC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 686e8d8..a3cb35b 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -21,76 +21,16 @@
 #define _XENO_ASM_SH_ATOMIC_H
 
 #ifdef __KERNEL__
-
-#include <linux/bitops.h>
 #include <asm/atomic.h>
-#include <asm/system.h>
-
-#define xnarch_atomic_xchg(ptr,v)      xchg(ptr,v)
-#define xnarch_memory_barrier()                smp_mb()
-#define xnarch_read_memory_barrier()   rmb()
-#define xnarch_write_memory_barrier()  wmb()
-
-#define xnarch_atomic_set(pcounter,i)          atomic_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)            atomic_read(pcounter)
-#define xnarch_atomic_inc(pcounter)            atomic_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)            atomic_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter)   atomic_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter)   atomic_dec_and_test(pcounter)
-#define xnarch_atomic_set_mask(pflags,mask)    atomic_set_mask(mask, (atomic_t 
*)(pflags))
-#define xnarch_atomic_clear_mask(pflags,mask)  atomic_clear_mask(mask, 
(atomic_t *)(pflags))
-#define xnarch_atomic_cmpxchg(pcounter,old,new) atomic_cmpxchg((pcounter), 
(old), (new))
-
-typedef atomic_t atomic_counter_t;
-typedef atomic_t xnarch_atomic_t;
 
-#else /* !__KERNEL__ */
+#define xnarch_atomic_set_mask(pflags,mask) \
+       atomic_set_mask(mask, (atomic_t *)(pflags))
+#define xnarch_atomic_clear_mask(pflags,mask) \
+       atomic_clear_mask(mask, (atomic_t *)(pflags))
 
-#include <asm/xenomai/features.h>
+#else /* !__KERNEL */
 #include <endian.h>
 
-typedef struct { int counter; } xnarch_atomic_t;
-
-#define xnarch_atomic_get(v)           ((v)->counter)
-#define xnarch_atomic_set(v, i)                (((v)->counter) = i)
-
-/*
- * Shamelessly lifted from the gUSA-compliant xchg() code in kernel
- * space. NOTE: we DO need GUSA_RB available on the platform for this
- * to work.
- */
-static inline unsigned long xchg_u32(volatile unsigned int *m, unsigned long 
val)
-{
-       unsigned long retval;
-
-       __asm__ __volatile__ (
-               "   .align 2              \n\t"
-               "   mova    1f,   r0      \n\t" /* r0 = end point */
-               "   nop                   \n\t"
-               "   mov    r15,   r1      \n\t" /* r1 = saved sp */
-               "   mov    #-4,   r15     \n\t" /* LOGIN */
-               "   mov.l  @%1,   %0      \n\t" /* load  old value */
-               "   mov.l   %2,   @%1     \n\t" /* store new value */
-               "1: mov     r1,   r15     \n\t" /* LOGOUT */
-               : "=&r" (retval),
-                 "+r"  (m)
-               : "r"   (val)
-               : "memory", "r0", "r1");
-
-       return retval;
-}
-
-#define __do_xchg(ptr, x)                              \
-({                                                     \
-       unsigned long __xchg__res;                      \
-       volatile void *__xchg_ptr = (ptr);              \
-       __xchg__res = xchg_u32(__xchg_ptr, x);          \
-       __xchg__res;                                    \
-})
-
-#define xnarch_atomic_xchg(ptr,x)      \
-       ((__typeof__(*(ptr)))__do_xchg((ptr),(unsigned long)(x)))
-
 static inline unsigned long long load_u64(volatile void *p)
 {
        union {
@@ -129,12 +69,6 @@ static inline unsigned long long load_u64(volatile void *p)
 
 #define xnarch_memory_barrier()     __asm__ __volatile__("": : :"memory")
 
-#define cpu_relax()                    xnarch_memory_barrier()
-#define xnarch_read_memory_barrier()   xnarch_memory_barrier()
-#define xnarch_write_memory_barrier()  xnarch_memory_barrier()
-
 #endif /* __KERNEL__ */
 
-typedef unsigned long atomic_flags_t;
-
 #endif /* !_XENO_ASM_SH_ATOMIC_H */
diff --git a/include/asm-x86/atomic.h b/include/asm-x86/atomic.h
index cc88de5..81a8218 100644
--- a/include/asm-x86/atomic.h
+++ b/include/asm-x86/atomic.h
@@ -22,41 +22,15 @@
 
 #include <asm/xenomai/features.h>
 
-typedef unsigned long atomic_flags_t;
-
 #ifdef __KERNEL__
-
-#include <linux/bitops.h>
 #include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/xenomai/wrappers.h>
-
-#define xnarch_atomic_set(pcounter,i)  atomic_long_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)    atomic_long_read(pcounter)
-#define xnarch_atomic_inc(pcounter)    atomic_long_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)    atomic_long_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter) \
-       atomic_long_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter) \
-       atomic_long_dec_and_test(pcounter)
-#define xnarch_atomic_cmpxchg(pcounter,old,new) \
-       atomic_long_cmpxchg((pcounter),(old),(new))
 
-typedef atomic_long_t atomic_counter_t;
-typedef atomic_long_t xnarch_atomic_t;
-
-#define xnarch_atomic_set_mask(pflags,mask) \
+#define xnarch_atomic_set_mask(pflags,mask)            \
        atomic_set_mask((mask),(unsigned *)(pflags))
-#define xnarch_atomic_clear_mask(pflags,mask) \
+#define xnarch_atomic_clear_mask(pflags,mask)          \
        atomic_clear_mask((mask),(unsigned *)(pflags))
-#define xnarch_atomic_xchg(ptr,x)      xchg(ptr,x)
-
-#define xnarch_memory_barrier()                smp_mb()
-#define xnarch_read_memory_barrier()    rmb()
-#define xnarch_write_memory_barrier()   wmb()
-
-#else /* !__KERNEL__ */
 
+#else /* !__KERNEL */
 #include <xeno_config.h>
 
 #ifdef CONFIG_SMP
@@ -65,45 +39,10 @@ typedef atomic_long_t xnarch_atomic_t;
 #define LOCK_PREFIX ""
 #endif
 
-typedef struct { unsigned long counter; } xnarch_atomic_t;
-
-#define xnarch_atomic_get(v)           ((v)->counter)
-
-#define xnarch_atomic_set(v,i)         (((v)->counter) = (i))
-
-static inline void cpu_relax(void)
-{
-       asm volatile("rep; nop" ::: "memory");
-}
+#define cpu_relax() asm volatile("rep; nop" ::: "memory")
 
 #ifdef __i386__
 
-struct __xeno_xchg_dummy { unsigned long a[100]; };
-#define __xeno_xg(x) ((struct __xeno_xchg_dummy *)(x))
-
-static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
-                                               unsigned long x)
-{
-       __asm__ __volatile__("xchgl %0,%1"
-                            :"=r" (x)
-                            :"m" (*__xeno_xg(ptr)), "0" (x)
-                            :"memory");
-       return x;
-}
-
-static inline unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long 
newval)
-{
-       volatile void *ptr = &v->counter;
-       unsigned long prev;
-
-       __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-                            : "=a"(prev)
-                            : "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
-                            : "memory");
-       return prev;
-}
-
 #define xnarch_memory_barrier()                __asm__ __volatile__("": : 
:"memory")
 #define xnarch_read_memory_barrier() \
        __asm__ __volatile__ (LOCK_PREFIX "addl $0,0(%%esp)": : :"memory")
@@ -112,37 +51,14 @@ xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long 
old, unsigned long newva
 
 #else /* x86_64 */
 
-#define __xeno_xg(x) ((volatile long *)(x))
-
-static inline unsigned long xnarch_atomic_xchg (volatile void *ptr,
-                                               unsigned long x)
-{
-       __asm__ __volatile__("xchgq %0,%1"
-                            :"=r" (x)
-                            :"m" (*__xeno_xg(ptr)), "0" (x)
-                            :"memory");
-       return x;
-}
-
-static inline unsigned long
-xnarch_atomic_cmpxchg(xnarch_atomic_t *v, unsigned long old, unsigned long 
newval)
-{
-       volatile void *ptr = &v->counter;
-       unsigned long prev;
-
-       __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
-                            : "=a"(prev)
-                            : "r"(newval), "m"(*__xeno_xg(ptr)), "0"(old)
-                            : "memory");
-       return prev;
-}
-
 #define xnarch_memory_barrier()                asm 
volatile("mfence":::"memory")
 #define xnarch_read_memory_barrier()   asm volatile("lfence":::"memory")
 #define xnarch_write_memory_barrier()  asm volatile("sfence":::"memory")
 
 #endif /* x86_64 */
 
-#endif /* __KERNEL__ */
+#endif /* !__KERNEL__ */
+
+#include <asm-generic/xenomai/atomic.h>
 
 #endif /* !_XENO_ASM_X86_ATOMIC_64_H */


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to