(Note: Patch also attached because the inline version is certain to get line wrapped.)
While only cosmetic for x86-64, this adjusts the cmpxchg code appearantly inherited from i386 to use more generic constraints. Signed-off-by: Jan Beulich <[EMAIL PROTECTED]> diff -Npru 2.6.13/include/asm-x86_64/system.h 2.6.13-x86_64-cmpxchg/include/asm-x86_64/system.h --- 2.6.13/include/asm-x86_64/system.h 2005-08-29 01:41:01.000000000 +0200 +++ 2.6.13-x86_64-cmpxchg/include/asm-x86_64/system.h 2005-09-01 11:32:12.000000000 +0200 @@ -247,25 +247,25 @@ static inline unsigned long __cmpxchg(vo case 1: __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; }
linux-2.6.13-x86_64-cmpxchg.patch
Description: Binary data