Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Richard Henderson

On Fri, Sep 08, 2000 at 04:58:33PM +0400, Ivan Kokshaysky wrote:
> Yes, I can reproduce this with gcc-2.95.2 (compiles cleanly with 2.96).
> Looks like older gcc doesn't like when output operand 5 listed
> also as input. Hmm.
> Simple swapping operands 4 and 5 makes gcc happy.

I've got a patch to rearrange this whole sets of codes
into something a bit cleaner.  I'll see about forwarding
it shortly.


r~
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Richard Henderson

On Fri, Sep 08, 2000 at 08:36:58PM +0400, Ivan Kokshaysky wrote:
> FWIW, here are __xchg_u8 and __xchg_u16 for Alpha.

I like it.  


r~
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Ivan Kokshaysky

On Sat, Sep 09, 2000 at 12:50:38AM +1100, Anton Blanchard wrote:
> Yeah on most architectures you cant do an xchg of a 16 bit quantity.
> Rusty has a patch:
> 
...
FWIW, here are __xchg_u8 and __xchg_u16 for Alpha.

Ivan.

--- 2.4.0t8p6/include/asm-alpha/system.hThu Sep  7 19:01:46 2000
+++ linux/include/asm-alpha/system.hFri Sep  8 20:12:47 2000
@@ -347,6 +347,56 @@ extern void __global_restore_flags(unsig
  */
 
 extern __inline__ unsigned long
+__xchg_u8(volatile char *m, unsigned long val)
+{
+   unsigned long tmp1, tmp2, addr64;
+
+   __asm__ __volatile__(
+   "   andnot  %4,7,%3\n"
+   "   insbl   %0,%4,%1\n"
+   "1: ldq_l   %2,0(%3)\n"
+   "   extbl   %2,%4,%0\n"
+   "   mskbl   %2,%4,%2\n"
+   "   or  %1,%2,%2\n"
+   "   stq_c   %2,0(%3)\n"
+   "   beq %2,2f\n"
+   "   mb\n"
+   ".subsection 2\n"
+   "2: br  1b\n"
+   ".previous"
+   : "=" (val), "=r" (tmp1), "=r" (tmp2), "=r" (addr64)
+   : "r" ((long)m), "0" (val)
+   : "memory");
+
+   return val;
+}
+
+extern __inline__ unsigned long
+__xchg_u16(volatile short *m, unsigned long val)
+{
+   unsigned long tmp1, tmp2, addr64;
+
+   __asm__ __volatile__(
+   "   andnot  %4,7,%3\n"
+   "   inswl   %0,%4,%1\n"
+   "1: ldq_l   %2,0(%3)\n"
+   "   extwl   %2,%4,%0\n"
+   "   mskwl   %2,%4,%2\n"
+   "   or  %1,%2,%2\n"
+   "   stq_c   %2,0(%3)\n"
+   "   beq %2,2f\n"
+   "   mb\n"
+   ".subsection 2\n"
+   "2: br  1b\n"
+   ".previous"
+   : "=" (val), "=r" (tmp1), "=r" (tmp2), "=r" (addr64)
+   : "r" ((long)m), "0" (val)
+   : "memory");
+
+   return val;
+}
+
+extern __inline__ unsigned long
 __xchg_u32(volatile int *m, unsigned long val)
 {
unsigned long dummy;
@@ -394,6 +444,10 @@ static __inline__ unsigned long
 __xchg(volatile void *ptr, unsigned long x, int size)
 {
switch (size) {
+   case 1:
+   return __xchg_u8(ptr, x);
+   case 2:
+   return __xchg_u16(ptr, x);
case 4:
return __xchg_u32(ptr, x);
case 8:
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Christopher C. Chimelis


On Sat, 9 Sep 2000, Anton Blanchard wrote:

> Yeah on most architectures you cant do an xchg of a 16 bit quantity.
> Rusty has a patch:

That's what I thought as well, at least for Alpha's case.  Thanks...will
try both patches and let you all know how it goes...

C


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Anton Blanchard

 
> Great.  I'll apply the patch and see where the next breakage is :-P  I
> believe there was a problem in the netfilter code
> (net/ipv4/netfilter/ipt_REJECT.c, lines 67-68) with the selection of
> which xchg() to use (either __xchg_u32() or __xchg_u64()as detailed in
> include/asm-alpha/system.h) since it's apparently trying to use
> __xchg_called_with_bad_pointer(), which is undefined on purpose.  So
> either something's not getting called properly or the detection is messed
> up (still have to look into it).

Yeah on most architectures you cant do an xchg of a 16 bit quantity.
Rusty has a patch:

diff -ur -X /tmp/fileAYCCtF --minimal 
linux-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c 
working-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c
--- linux-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c Tue Aug 22 17:28:14 2000
+++ working-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c   Wed Aug 23 18:46:15 
+2000
@@ -27,6 +27,7 @@
struct tcphdr *otcph, *tcph;
struct rtable *rt;
unsigned int otcplen;
+   u_int16_t tmp;
int needs_ack;
 
/* IP header checks: fragment, too short. */
@@ -64,8 +65,11 @@
 
tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);
 
+   /* Swap source and dest */
nskb->nh.iph->daddr = xchg(>nh.iph->saddr, nskb->nh.iph->daddr);
-   tcph->source = xchg(>dest, tcph->source);
+   tmp = tcph->source;
+   tcph->source = tcph->dest;
+   tcph->dest = tmp;
 
/* Truncate to length (no data) */
tcph->doff = sizeof(struct tcphdr)/4;

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Christopher C. Chimelis


On Fri, 8 Sep 2000, Ivan Kokshaysky wrote:

> Yes, I can reproduce this with gcc-2.95.2 (compiles cleanly with 2.96).
> Looks like older gcc doesn't like when output operand 5 listed
> also as input. Hmm.
> Simple swapping operands 4 and 5 makes gcc happy.

Great.  I'll apply the patch and see where the next breakage is :-P  I
believe there was a problem in the netfilter code
(net/ipv4/netfilter/ipt_REJECT.c, lines 67-68) with the selection of
which xchg() to use (either __xchg_u32() or __xchg_u64()as detailed in
include/asm-alpha/system.h) since it's apparently trying to use
__xchg_called_with_bad_pointer(), which is undefined on purpose.  So
either something's not getting called properly or the detection is messed
up (still have to look into it).

C

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Ivan Kokshaysky

On Fri, Sep 08, 2000 at 04:19:25AM -0400, Christopher C. Chimelis wrote:
> xor.c: In function `xor_block_alpha':
> xor.c:1791: inconsistent operand constraints in an `asm'
> xor.c: In function `xor_block_alpha_prefetch':
> xor.c:2213: inconsistent operand constraints in an `asm'
> 
Yes, I can reproduce this with gcc-2.95.2 (compiles cleanly with 2.96).
Looks like older gcc doesn't like when output operand 5 listed
also as input. Hmm.
Simple swapping operands 4 and 5 makes gcc happy.

Ivan.

--- 2.4.0p8t5/drivers/block/xor.c   Wed Sep  6 11:57:49 2000
+++ linux/drivers/block/xor.c   Fri Sep  8 16:20:27 2000
@@ -1667,12 +1667,12 @@
ldq %1,8(%6)
ldq %2,16(%6)
ldq %3,24(%6)
-   ldq %4,32(%6)
+   ldq %5,32(%6)
ldq %0,%7(%0)
ldq %1,%7(%1)
ldq %2,%7(%2)
ldq %3,%7(%3)
-   ldq %4,%7(%4)
+   ldq %5,%7(%5)
.align 4
 5:
ldq $0,0(%0)
@@ -1680,13 +1680,13 @@
ldq $2,0(%2)
ldq $3,0(%3)
 
-   ldq $4,0(%4)
+   ldq $4,0(%5)
ldq $5,8(%0)
ldq $6,8(%1)
ldq $7,8(%2)
 
ldq $16,8(%3)
-   ldq $17,8(%4)
+   ldq $17,8(%5)
ldq $18,16(%0)
ldq $19,16(%1)
 
@@ -1695,7 +1695,7 @@
ldq $21,16(%3)
xor $2,$3,$3# 6 cycles from $3 load
 
-   ldq $0,16(%4)
+   ldq $0,16(%5)
xor $1,$3,$3
ldq $1,24(%0)
xor $3,$4,$4# 7 cycles from $4 load
@@ -1715,7 +1715,7 @@
ldq $4,24(%3)
xor $21,$0,$0   # 7 cycles from $0 load
 
-   ldq $5,24(%4)
+   ldq $5,24(%5)
xor $20,$0,$0
ldq $6,32(%0)
ldq $7,32(%1)
@@ -1727,7 +1727,7 @@

ldq $17,32(%3)
xor $2,$4,$4
-   ldq $18,32(%4)
+   ldq $18,32(%5)
ldq $19,40(%0)
 
ldq $20,40(%1)
@@ -1737,7 +1737,7 @@
 
stq $5,24(%0)
xor $6,$7,$7# 7 cycles from $7 load
-   ldq $1,40(%4)
+   ldq $1,40(%5)
ldq $2,48(%0)
 
ldq $3,48(%1)
@@ -1747,7 +1747,7 @@
 
ldq $5,48(%3)
xor $16,$18,$18
-   ldq $6,48(%4)
+   ldq $6,48(%5)
xor $19,$20,$20 # 7 cycles from $20 load
 
stq $18,32(%0)
@@ -1758,7 +1758,7 @@
ldq $16,56(%1)
ldq $17,56(%2)
ldq $18,56(%3)
-   ldq $19,56(%4)
+   ldq $19,56(%5)
 
xor $21,$1,$1
xor $2,$3,$3# 9 cycles from $3 load
@@ -1772,21 +1772,21 @@
 
stq $6,48(%0)
xor $16,$18,$18
-   subq %5,1,%5
+   subq %4,1,%4
xor $18,$19,$19 # 8 cycles from $19 load
 
stq $19,56(%0)
-   addq %4,64,%4
+   addq %5,64,%5
addq %3,64,%3
addq %2,64,%2
 
addq %1,64,%1
addq %0,64,%0
-   bgt %5,5b"
-   : "="(d), "="(s1), "="(s2), "="(s3), "=r"(s4), "=r"(lines)
+   bgt %4,5b"
+   : "="(d), "="(s1), "="(s2), "="(s3), "=r"(lines), "=r"(s4)
/* ARG! We've run out of asm arguments!  We've got to reload
   all those pointers we just loaded.  */
-   : "r"(bh_ptr), "i" (&((struct buffer_head *)0)->b_data), "5"(lines)
+   : "r"(bh_ptr), "i" (&((struct buffer_head *)0)->b_data), "4"(lines)
: "memory", "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
  "$16", "$17", "$18", "$19", "$20", "$21");
return;
@@ -2084,12 +2084,12 @@
ldq %1,8(%6)
ldq %2,16(%6)
ldq %3,24(%6)
-   ldq %4,32(%6)
+   ldq %5,32(%6)
ldq %0,%7(%0)
ldq %1,%7(%1)
ldq %2,%7(%2)
ldq %3,%7(%3)
-   ldq %4,%7(%4)
+   ldq %5,%7(%5)
.align 4
 5:
ldq $0,0(%0)
@@ -2097,13 +2097,13 @@
ldq $2,0(%2)
ldq $3,0(%3)
 
-   ldq $4,0(%4)
+   ldq $4,0(%5)
ldq $5,8(%0)
ldq $6,8(%1)
ldq $7,8(%2)
 
ldq $16,8(%3)
-   ldq $17,8(%4)
+   ldq $17,8(%5)
ldq $18,16(%0)
ldq $19,16(%1)
 
@@ -2112,7 +2112,7 @@
ldq $21,16(%3)
xor $2,$3,$3# 6 cycles from $3 load
 
-   ldq $0,16(%4)
+   ldq $0,16(%5)
xor $1,$3,$3
ldq $1,24(%0)
xor $3,$4,$4# 7 cycles from $4 load
@@ -2132,7 +2132,7 @@
ldq $4,24(%3)
xor $21,$0,$0   # 7 cycles from $0 load
 
-   ldq $5,24(%4)
+   ldq $5,24(%5)
xor $20,$0,$0
ldq $6,32(%0)
ldq $7,32(%1)
@@ -2144,7 +2144,7 @@

ldq $17,32(%3)
xor $2,$4,$4
-   ldq $18,32(%4)
+   ldq $18,32(%5)
ldq $19,40(%0)
 
ldq $20,40(%1)
@@ -2154,7 +2154,7 @@
 
stq $5,24(%0)
xor $6,$7,$7# 7 cycles from $7 load
-   ldq $1,40(%4)
+   ldq $1,40(%5)
ldq $2,48(%0)
 
ldq $3,48(%1)
@@ -2164,7 +2164,7 @@
 
ldq $5,48(%3)
xor $16,$18,$18
-   ldq $6,48(%4)
+   ldq $6,48(%5)
xor $19,$20,$20 # 7 cycles from $20 load
 
stq $18,32(%0)
@@ -2175,7 +2175,7 @@

Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Ivan Kokshaysky

On Fri, Sep 08, 2000 at 04:19:25AM -0400, Christopher C. Chimelis wrote:
 xor.c: In function `xor_block_alpha':
 xor.c:1791: inconsistent operand constraints in an `asm'
 xor.c: In function `xor_block_alpha_prefetch':
 xor.c:2213: inconsistent operand constraints in an `asm'
 
Yes, I can reproduce this with gcc-2.95.2 (compiles cleanly with 2.96).
Looks like older gcc doesn't like when output operand 5 listed
also as input. Hmm.
Simple swapping operands 4 and 5 makes gcc happy.

Ivan.

--- 2.4.0p8t5/drivers/block/xor.c   Wed Sep  6 11:57:49 2000
+++ linux/drivers/block/xor.c   Fri Sep  8 16:20:27 2000
@@ -1667,12 +1667,12 @@
ldq %1,8(%6)
ldq %2,16(%6)
ldq %3,24(%6)
-   ldq %4,32(%6)
+   ldq %5,32(%6)
ldq %0,%7(%0)
ldq %1,%7(%1)
ldq %2,%7(%2)
ldq %3,%7(%3)
-   ldq %4,%7(%4)
+   ldq %5,%7(%5)
.align 4
 5:
ldq $0,0(%0)
@@ -1680,13 +1680,13 @@
ldq $2,0(%2)
ldq $3,0(%3)
 
-   ldq $4,0(%4)
+   ldq $4,0(%5)
ldq $5,8(%0)
ldq $6,8(%1)
ldq $7,8(%2)
 
ldq $16,8(%3)
-   ldq $17,8(%4)
+   ldq $17,8(%5)
ldq $18,16(%0)
ldq $19,16(%1)
 
@@ -1695,7 +1695,7 @@
ldq $21,16(%3)
xor $2,$3,$3# 6 cycles from $3 load
 
-   ldq $0,16(%4)
+   ldq $0,16(%5)
xor $1,$3,$3
ldq $1,24(%0)
xor $3,$4,$4# 7 cycles from $4 load
@@ -1715,7 +1715,7 @@
ldq $4,24(%3)
xor $21,$0,$0   # 7 cycles from $0 load
 
-   ldq $5,24(%4)
+   ldq $5,24(%5)
xor $20,$0,$0
ldq $6,32(%0)
ldq $7,32(%1)
@@ -1727,7 +1727,7 @@

ldq $17,32(%3)
xor $2,$4,$4
-   ldq $18,32(%4)
+   ldq $18,32(%5)
ldq $19,40(%0)
 
ldq $20,40(%1)
@@ -1737,7 +1737,7 @@
 
stq $5,24(%0)
xor $6,$7,$7# 7 cycles from $7 load
-   ldq $1,40(%4)
+   ldq $1,40(%5)
ldq $2,48(%0)
 
ldq $3,48(%1)
@@ -1747,7 +1747,7 @@
 
ldq $5,48(%3)
xor $16,$18,$18
-   ldq $6,48(%4)
+   ldq $6,48(%5)
xor $19,$20,$20 # 7 cycles from $20 load
 
stq $18,32(%0)
@@ -1758,7 +1758,7 @@
ldq $16,56(%1)
ldq $17,56(%2)
ldq $18,56(%3)
-   ldq $19,56(%4)
+   ldq $19,56(%5)
 
xor $21,$1,$1
xor $2,$3,$3# 9 cycles from $3 load
@@ -1772,21 +1772,21 @@
 
stq $6,48(%0)
xor $16,$18,$18
-   subq %5,1,%5
+   subq %4,1,%4
xor $18,$19,$19 # 8 cycles from $19 load
 
stq $19,56(%0)
-   addq %4,64,%4
+   addq %5,64,%5
addq %3,64,%3
addq %2,64,%2
 
addq %1,64,%1
addq %0,64,%0
-   bgt %5,5b"
-   : "=r"(d), "=r"(s1), "=r"(s2), "=r"(s3), "=r"(s4), "=r"(lines)
+   bgt %4,5b"
+   : "=r"(d), "=r"(s1), "=r"(s2), "=r"(s3), "=r"(lines), "=r"(s4)
/* ARG! We've run out of asm arguments!  We've got to reload
   all those pointers we just loaded.  */
-   : "r"(bh_ptr), "i" (((struct buffer_head *)0)-b_data), "5"(lines)
+   : "r"(bh_ptr), "i" (((struct buffer_head *)0)-b_data), "4"(lines)
: "memory", "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
  "$16", "$17", "$18", "$19", "$20", "$21");
return;
@@ -2084,12 +2084,12 @@
ldq %1,8(%6)
ldq %2,16(%6)
ldq %3,24(%6)
-   ldq %4,32(%6)
+   ldq %5,32(%6)
ldq %0,%7(%0)
ldq %1,%7(%1)
ldq %2,%7(%2)
ldq %3,%7(%3)
-   ldq %4,%7(%4)
+   ldq %5,%7(%5)
.align 4
 5:
ldq $0,0(%0)
@@ -2097,13 +2097,13 @@
ldq $2,0(%2)
ldq $3,0(%3)
 
-   ldq $4,0(%4)
+   ldq $4,0(%5)
ldq $5,8(%0)
ldq $6,8(%1)
ldq $7,8(%2)
 
ldq $16,8(%3)
-   ldq $17,8(%4)
+   ldq $17,8(%5)
ldq $18,16(%0)
ldq $19,16(%1)
 
@@ -2112,7 +2112,7 @@
ldq $21,16(%3)
xor $2,$3,$3# 6 cycles from $3 load
 
-   ldq $0,16(%4)
+   ldq $0,16(%5)
xor $1,$3,$3
ldq $1,24(%0)
xor $3,$4,$4# 7 cycles from $4 load
@@ -2132,7 +2132,7 @@
ldq $4,24(%3)
xor $21,$0,$0   # 7 cycles from $0 load
 
-   ldq $5,24(%4)
+   ldq $5,24(%5)
xor $20,$0,$0
ldq $6,32(%0)
ldq $7,32(%1)
@@ -2144,7 +2144,7 @@

ldq $17,32(%3)
xor $2,$4,$4
-   ldq $18,32(%4)
+   ldq $18,32(%5)
ldq $19,40(%0)
 
ldq $20,40(%1)
@@ -2154,7 +2154,7 @@
 
stq $5,24(%0)
xor $6,$7,$7# 7 cycles from $7 load
-   ldq $1,40(%4)
+   ldq $1,40(%5)
ldq $2,48(%0)
 
ldq $3,48(%1)
@@ -2164,7 +2164,7 @@
 
ldq $5,48(%3)
xor $16,$18,$18
-   ldq $6,48(%4)
+   ldq $6,48(%5)
xor $19,$20,$20 # 7 cycles from $20 load
 
stq $18,32(%0)
@@ -2175,7 +2175,7 @@

Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Anton Blanchard

 
 Great.  I'll apply the patch and see where the next breakage is :-P  I
 believe there was a problem in the netfilter code
 (net/ipv4/netfilter/ipt_REJECT.c, lines 67-68) with the selection of
 which xchg() to use (either __xchg_u32() or __xchg_u64()as detailed in
 include/asm-alpha/system.h) since it's apparently trying to use
 __xchg_called_with_bad_pointer(), which is undefined on purpose.  So
 either something's not getting called properly or the detection is messed
 up (still have to look into it).

Yeah on most architectures you cant do an xchg of a 16 bit quantity.
Rusty has a patch:

diff -ur -X /tmp/fileAYCCtF --minimal 
linux-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c 
working-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c
--- linux-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c Tue Aug 22 17:28:14 2000
+++ working-2.4.0-test7-6/net/ipv4/netfilter/ipt_REJECT.c   Wed Aug 23 18:46:15 
+2000
@@ -27,6 +27,7 @@
struct tcphdr *otcph, *tcph;
struct rtable *rt;
unsigned int otcplen;
+   u_int16_t tmp;
int needs_ack;
 
/* IP header checks: fragment, too short. */
@@ -64,8 +65,11 @@
 
tcph = (struct tcphdr *)((u_int32_t*)nskb-nh.iph + nskb-nh.iph-ihl);
 
+   /* Swap source and dest */
nskb-nh.iph-daddr = xchg(nskb-nh.iph-saddr, nskb-nh.iph-daddr);
-   tcph-source = xchg(tcph-dest, tcph-source);
+   tmp = tcph-source;
+   tcph-source = tcph-dest;
+   tcph-dest = tmp;
 
/* Truncate to length (no data) */
tcph-doff = sizeof(struct tcphdr)/4;

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Ivan Kokshaysky

On Sat, Sep 09, 2000 at 12:50:38AM +1100, Anton Blanchard wrote:
 Yeah on most architectures you cant do an xchg of a 16 bit quantity.
 Rusty has a patch:
 
...
FWIW, here are __xchg_u8 and __xchg_u16 for Alpha.

Ivan.

--- 2.4.0t8p6/include/asm-alpha/system.hThu Sep  7 19:01:46 2000
+++ linux/include/asm-alpha/system.hFri Sep  8 20:12:47 2000
@@ -347,6 +347,56 @@ extern void __global_restore_flags(unsig
  */
 
 extern __inline__ unsigned long
+__xchg_u8(volatile char *m, unsigned long val)
+{
+   unsigned long tmp1, tmp2, addr64;
+
+   __asm__ __volatile__(
+   "   andnot  %4,7,%3\n"
+   "   insbl   %0,%4,%1\n"
+   "1: ldq_l   %2,0(%3)\n"
+   "   extbl   %2,%4,%0\n"
+   "   mskbl   %2,%4,%2\n"
+   "   or  %1,%2,%2\n"
+   "   stq_c   %2,0(%3)\n"
+   "   beq %2,2f\n"
+   "   mb\n"
+   ".subsection 2\n"
+   "2: br  1b\n"
+   ".previous"
+   : "=r" (val), "=r" (tmp1), "=r" (tmp2), "=r" (addr64)
+   : "r" ((long)m), "0" (val)
+   : "memory");
+
+   return val;
+}
+
+extern __inline__ unsigned long
+__xchg_u16(volatile short *m, unsigned long val)
+{
+   unsigned long tmp1, tmp2, addr64;
+
+   __asm__ __volatile__(
+   "   andnot  %4,7,%3\n"
+   "   inswl   %0,%4,%1\n"
+   "1: ldq_l   %2,0(%3)\n"
+   "   extwl   %2,%4,%0\n"
+   "   mskwl   %2,%4,%2\n"
+   "   or  %1,%2,%2\n"
+   "   stq_c   %2,0(%3)\n"
+   "   beq %2,2f\n"
+   "   mb\n"
+   ".subsection 2\n"
+   "2: br  1b\n"
+   ".previous"
+   : "=r" (val), "=r" (tmp1), "=r" (tmp2), "=r" (addr64)
+   : "r" ((long)m), "0" (val)
+   : "memory");
+
+   return val;
+}
+
+extern __inline__ unsigned long
 __xchg_u32(volatile int *m, unsigned long val)
 {
unsigned long dummy;
@@ -394,6 +444,10 @@ static __inline__ unsigned long
 __xchg(volatile void *ptr, unsigned long x, int size)
 {
switch (size) {
+   case 1:
+   return __xchg_u8(ptr, x);
+   case 2:
+   return __xchg_u16(ptr, x);
case 4:
return __xchg_u32(ptr, x);
case 8:
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/



Re: Compilation failure on Alpha with test8-pre[2-6]

2000-09-08 Thread Richard Henderson

On Fri, Sep 08, 2000 at 08:36:58PM +0400, Ivan Kokshaysky wrote:
 FWIW, here are __xchg_u8 and __xchg_u16 for Alpha.

I like it.  


r~
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/