tags 406409 +patch
thanks

The appended patch extends the configure check to cover mips64 and
mipsel, and improves the mips atomics implementation. Build tested
on mips/unstable.


Thiemo


--- bind9-9.4.0~rc1.0.old/configure.in  2006-11-10 18:30:39.000000000 +0000
+++ bind9-9.4.0~rc1.0/configure.in      2007-01-21 01:29:21.000000000 +0000
@@ -1944,7 +1944,7 @@ main() {
        powerpc-*)
                arch=powerpc
        ;;
-       mips-*)
+       mips*-*)
                arch=mips
        ;;
        ia64-*)
--- bind9-9.4.0~rc1.0.old/lib/isc/mips/include/isc/atomic.h     2005-07-09 
08:14:00.000000000 +0100
+++ bind9-9.4.0~rc1.0/lib/isc/mips/include/isc/atomic.h 2007-01-21 
00:26:16.000000000 +0000
@@ -31,18 +31,20 @@ static inline isc_int32_t
 isc_atomic_xadd(isc_int32_t *p, int val) {
        isc_int32_t orig;
 
-       /* add is a cheat, since MIPS has no mov instruction */
-       __asm__ volatile (
-           "1:"
-           "ll $3, %1\n"
-           "add %0, $0, $3\n"
-           "add $3, $3, %2\n"
-           "sc $3, %1\n"
-           "beq $3, 0, 1b"
-           : "=&r"(orig)
-           : "m"(*p), "r"(val)
-           : "memory", "$3"
-               );
+       __asm__ __volatile__ (
+       "       .set    push            \n"
+       "       .set    mips2           \n"
+       "       .set    noreorder       \n"
+       "       .set    noat            \n"
+       "1:     ll      $1, %1          \n"
+       "       addu    %0, $1, %2      \n"
+       "       sc      %0, %1          \n"
+       "       beqz    %0, 1b          \n"
+       "        addu   %0, $1, %2      \n"
+       "       .set    pop             \n"
+       : "=&r" (orig), "+R" (*p)
+       : "r" (val)
+       : "memory");
 
        return (orig);
 }
@@ -52,16 +54,7 @@ isc_atomic_xadd(isc_int32_t *p, int val)
  */
 static inline void
 isc_atomic_store(isc_int32_t *p, isc_int32_t val) {
-       __asm__ volatile (
-           "1:"
-           "ll $3, %0\n"
-           "add $3, $0, %1\n"
-           "sc $3, %0\n"
-           "beq $3, 0, 1b"
-           :
-           : "m"(*p), "r"(val)
-           : "memory", "$3"
-               );
+       *p = val;
 }
 
 /*
@@ -72,20 +65,22 @@ isc_atomic_store(isc_int32_t *p, isc_int
 static inline isc_int32_t
 isc_atomic_cmpxchg(isc_int32_t *p, int cmpval, int val) {
        isc_int32_t orig;
+       isc_int32_t tmp;
 
-       __asm__ volatile(
-           "1:"
-           "ll $3, %1\n"
-           "add %0, $0, $3\n"
-           "bne $3, %2, 2f\n"
-           "add $3, $0, %3\n"
-           "sc $3, %1\n"
-           "beq $3, 0, 1b\n"
-           "2:"
-           : "=&r"(orig)
-           : "m"(*p), "r"(cmpval), "r"(val)
-           : "memory", "$3"
-               );
+       __asm__ __volatile__ (
+       "       .set    push            \n"
+       "       .set    mips2           \n"
+       "       .set    noreorder       \n"
+       "       .set    noat            \n"
+       "1:     ll      $1, %1          \n"
+       "       bne     $1, %3, 2f      \n"
+       "        move   %2, %4          \n"
+       "       sc      %2, %1          \n"
+       "       beqz    %2, 1b          \n"
+       "2:      move   %0, $1          \n"
+       : "=&r"(orig), "+R" (*p), "=r" (tmp)
+       : "r"(cmpval), "r"(val)
+       : "memory");
 
        return (orig);
 }


-- 
To UNSUBSCRIBE, email to [EMAIL PROTECTED]
with a subject of "unsubscribe". Trouble? Contact [EMAIL PROTECTED]

Reply via email to