The attached change revises the argument order of the __kernel_cmpxchg and __kernel_cmpxchg2 calls so that they now match the argument order of the light-weight syscalls. All callers were updated.
The clobbers and written arguments of the asms in these two functions were reviewed and updated to match the kernel implementation. Tested on hppa-unknown-linux-gnu with no observed regressions. Committed to trunk, gcc-5 and gcc-4.9. Dave -- John David Anglin dave.ang...@bell.net
2015-07-01 John David Anglin <dang...@gcc.gnu.org> * config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to better match light-weight syscall argument order. (__kernel_cmpxchg2): Likewise. Adjust callers. Index: config/pa/linux-atomic.c =================================================================== --- config/pa/linux-atomic.c (revision 224978) +++ config/pa/linux-atomic.c (working copy) @@ -46,18 +46,17 @@ /* Kernel helper for compare-and-exchange a 32-bit value. */ static inline long -__kernel_cmpxchg (int oldval, int newval, int *mem) +__kernel_cmpxchg (int *mem, int oldval, int newval) { register unsigned long lws_mem asm("r26") = (unsigned long) (mem); + register int lws_old asm("r25") = oldval; + register int lws_new asm("r24") = newval; register long lws_ret asm("r28"); register long lws_errno asm("r21"); - register int lws_old asm("r25") = oldval; - register int lws_new asm("r24") = newval; asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" - "ldi %5, %%r20 \n\t" - : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem), - "=r" (lws_old), "=r" (lws_new) - : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new) + "ldi %2, %%r20 \n\t" + : "=r" (lws_ret), "=r" (lws_errno) + : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new) : "r1", "r20", "r22", "r23", "r29", "r31", "memory" ); if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) @@ -73,19 +72,20 @@ } static inline long -__kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem, +__kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval, int val_size) { register unsigned long lws_mem asm("r26") = (unsigned long) (mem); - register long lws_ret asm("r28"); - register long lws_errno asm("r21"); register unsigned long lws_old asm("r25") = (unsigned long) oldval; register unsigned long lws_new asm("r24") = (unsigned long) newval; register int lws_size asm("r23") = val_size; + register long lws_ret asm("r28"); + register long lws_errno asm("r21"); asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" - "ldi %2, %%r20 \n\t" - : "=r" (lws_ret), "=r" (lws_errno) - : "i" (2), "r" (lws_mem), "r" (lws_old), "r" (lws_new), "r" (lws_size) + "ldi %6, %%r20 \n\t" + : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem), + "+r" (lws_old), "+r" (lws_new), "+r" (lws_size) + : "i" (2) : "r1", "r20", "r22", "r29", "r31", "fr4", "memory" ); if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) @@ -116,7 +116,7 @@ do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ newval = PFX_OP (tmp INF_OP val); \ - failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \ } while (failure != 0); \ \ return tmp; \ @@ -146,7 +146,7 @@ do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ newval = PFX_OP (tmp INF_OP val); \ - failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \ } while (failure != 0); \ \ return PFX_OP (tmp INF_OP val); \ @@ -174,7 +174,7 @@ \ do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ + failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \ } while (failure != 0); \ \ return tmp; \ @@ -195,7 +195,7 @@ \ do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ + failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \ } while (failure != 0); \ \ return PFX_OP (tmp INF_OP val); \ @@ -225,7 +225,7 @@ if (__builtin_expect (oldval != actual_oldval, 0)) \ return actual_oldval; \ \ - fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \ + fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \ \ if (__builtin_expect (!fail, 1)) \ return actual_oldval; \ @@ -236,7 +236,7 @@ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ TYPE newval) \ { \ - int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX); \ + int failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \ return (failure != 0); \ } @@ -255,7 +255,7 @@ if (__builtin_expect (oldval != actual_oldval, 0)) return actual_oldval; - fail = __kernel_cmpxchg (actual_oldval, newval, ptr); + fail = __kernel_cmpxchg (ptr, actual_oldval, newval); if (__builtin_expect (!fail, 1)) return actual_oldval; @@ -265,7 +265,7 @@ bool HIDDEN __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval) { - int failure = __kernel_cmpxchg (oldval, newval, ptr); + int failure = __kernel_cmpxchg (ptr, oldval, newval); return (failure == 0); } @@ -278,7 +278,7 @@ \ do { \ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \ } while (failure != 0); \ \ return oldval; \ @@ -294,7 +294,7 @@ do { oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); - failure = __kernel_cmpxchg (oldval, val, ptr); + failure = __kernel_cmpxchg (ptr, oldval, val); } while (failure != 0); return oldval; @@ -308,7 +308,7 @@ \ do { \ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX); \ } while (failure != 0); \ } @@ -321,7 +321,7 @@ int failure, oldval; do { - oldval = *ptr; - failure = __kernel_cmpxchg (oldval, 0, ptr); + oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); + failure = __kernel_cmpxchg (ptr, oldval, 0); } while (failure != 0); }