[PATCH] i386 rw_semaphores, general abstraction patch

2001-04-12 Thread David Howells

This patch (made against linux-2.4.4-pre2) takes Anton Blanchard's suggestions
and abstracts out the rwsem implementation somewhat. This makes the following
general files:

include/linux/rwsem.h   - general RW semaphore wrapper
include/linux/rwsem-spinlock.h  - rwsem inlines implemented in C
include/asm-xxx/rwsem.h - arch specific rwsem details
lib/rwsem.c - contention handlers for rwsems

The asm/rwsem.h file is responsible for specifying whether the general
default implementation should be used, or supplying an alternative optimised
implementation if not.

For the i386 arch, I've supplied the following:

include/asm-i386/rwsem.h- i386 specific rwsem details
include/asm-i386/rwsem-xadd.h   - i386 XADD optimised rwsems
include/asm-i386/rwsem-spin.h   - i386 optimised spinlocked rwsems
arch/i386/lib/rwsem.S   - i386 call wrapper stubs

I haven't changed any of the other arch's, but until their semaphore.h's refer
to linux/rwsem.h, they'll continue to use whatever method they currently
happen to use. Note that the contention handling functions have been renamed
to prevent name clashes with the old contention functions.

David



diff -uNr linux-2.4.4-pre2/arch/i386/kernel/i386_ksyms.c 
linux/arch/i386/kernel/i386_ksyms.c
--- linux-2.4.4-pre2/arch/i386/kernel/i386_ksyms.c  Thu Apr 12 08:57:23 2001
+++ linux/arch/i386/kernel/i386_ksyms.c Thu Apr 12 15:55:35 2001
@@ -80,8 +80,8 @@
 EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
 EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
 EXPORT_SYMBOL_NOVERS(__up_wakeup);
-EXPORT_SYMBOL_NOVERS(__down_write_failed);
-EXPORT_SYMBOL_NOVERS(__down_read_failed);
+EXPORT_SYMBOL_NOVERS(__rwsem_down_write_failed);
+EXPORT_SYMBOL_NOVERS(__rwsem_down_read_failed);
 EXPORT_SYMBOL_NOVERS(__rwsem_wake);
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
diff -uNr linux-2.4.4-pre2/arch/i386/kernel/semaphore.c 
linux/arch/i386/kernel/semaphore.c
--- linux-2.4.4-pre2/arch/i386/kernel/semaphore.c   Thu Apr 12 08:57:23 2001
+++ linux/arch/i386/kernel/semaphore.c  Thu Apr 12 10:10:34 2001
@@ -233,291 +233,6 @@
"ret"
 );
 
-asm(
-"
-.text
-.align 4
-.globl __down_read_failed
-__down_read_failed:
-   pushl   %edx
-   pushl   %ecx
-   calldown_read_failed
-   popl%ecx
-   popl%edx
-   ret
-"
-);
-
-asm(
-"
-.text
-.align 4
-.globl __down_write_failed
-__down_write_failed:
-   pushl   %edx
-   pushl   %ecx
-   calldown_write_failed
-   popl%ecx
-   popl%edx
-   ret
-"
-);
-
-asm(
-"
-.text
-.align 4
-.globl __rwsem_wake
-__rwsem_wake:
-   pushl   %edx
-   pushl   %ecx
-   callrwsem_wake
-   popl%ecx
-   popl%edx
-   ret
-"
-);
-
-struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *sem));
-struct rw_semaphore *FASTCALL(down_read_failed(struct rw_semaphore *sem));
-struct rw_semaphore *FASTCALL(down_write_failed(struct rw_semaphore *sem));
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-   int tmp = delta;
-
-#ifndef CONFIG_USING_SPINLOCK_BASED_RWSEM
-   __asm__ __volatile__(
-   LOCK_PREFIX "xadd %0,(%1)"
-   : "+r"(tmp)
-   : "r"(sem)
-   : "memory");
-
-#else
-
-   __asm__ __volatile__(
-   "# beginning rwsem_atomic_update\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX"  decb  "RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* try to grab the 
spinlock */
-   "  js3f\n" /* jump if failed */
-   "1:\n\t"
-#endif
-   "  xchgl %0,(%1)\n\t" /* retrieve the old value */
-   "  addl  %0,(%1)\n\t" /* add 0x0001, result in memory */
-#ifdef CONFIG_SMP
-   "  movb  $1,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* release the 
spinlock */
-#endif
-   ".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-   "3:\n\t" /* spin on the spinlock till we get it */
-   "  cmpb  $0,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t"
-   "  rep;nop   \n\t"
-   "  jle   3b\n\t"
-   "  jmp   1b\n"
-#endif
-   ".previous\n"
-   "# ending rwsem_atomic_update\n\t"
-   : "+r"(tmp)
-   : "r"(sem)
-   : "memory");
-
-#endif
-   return tmp+delta;
-}
-
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
-#ifndef CONFIG_USING_SPINLOCK_BASED_RWSEM
-   return cmpxchg((__u16*)>count.counter,0,RWSEM_ACTIVE_BIAS);
-
-#else
-   __u16 prev;
-
-   __asm__ __volatile__(
-   "# beginning rwsem_cmpxchgw\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX"  decb  

[PATCH] i386 rw_semaphores, general abstraction patch

2001-04-12 Thread David Howells

This patch (made against linux-2.4.4-pre2) takes Anton Blanchard's suggestions
and abstracts out the rwsem implementation somewhat. This makes the following
general files:

include/linux/rwsem.h   - general RW semaphore wrapper
include/linux/rwsem-spinlock.h  - rwsem inlines implemented in C
include/asm-xxx/rwsem.h - arch specific rwsem details
lib/rwsem.c - contention handlers for rwsems

The asm/rwsem.h file is responsible for specifying whether the general
default implementation should be used, or supplying an alternative optimised
implementation if not.

For the i386 arch, I've supplied the following:

include/asm-i386/rwsem.h- i386 specific rwsem details
include/asm-i386/rwsem-xadd.h   - i386 XADD optimised rwsems
include/asm-i386/rwsem-spin.h   - i386 optimised spinlocked rwsems
arch/i386/lib/rwsem.S   - i386 call wrapper stubs

I haven't changed any of the other arch's, but until their semaphore.h's refer
to linux/rwsem.h, they'll continue to use whatever method they currently
happen to use. Note that the contention handling functions have been renamed
to prevent name clashes with the old contention functions.

David



diff -uNr linux-2.4.4-pre2/arch/i386/kernel/i386_ksyms.c 
linux/arch/i386/kernel/i386_ksyms.c
--- linux-2.4.4-pre2/arch/i386/kernel/i386_ksyms.c  Thu Apr 12 08:57:23 2001
+++ linux/arch/i386/kernel/i386_ksyms.c Thu Apr 12 15:55:35 2001
@@ -80,8 +80,8 @@
 EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
 EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
 EXPORT_SYMBOL_NOVERS(__up_wakeup);
-EXPORT_SYMBOL_NOVERS(__down_write_failed);
-EXPORT_SYMBOL_NOVERS(__down_read_failed);
+EXPORT_SYMBOL_NOVERS(__rwsem_down_write_failed);
+EXPORT_SYMBOL_NOVERS(__rwsem_down_read_failed);
 EXPORT_SYMBOL_NOVERS(__rwsem_wake);
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
diff -uNr linux-2.4.4-pre2/arch/i386/kernel/semaphore.c 
linux/arch/i386/kernel/semaphore.c
--- linux-2.4.4-pre2/arch/i386/kernel/semaphore.c   Thu Apr 12 08:57:23 2001
+++ linux/arch/i386/kernel/semaphore.c  Thu Apr 12 10:10:34 2001
@@ -233,291 +233,6 @@
"ret"
 );
 
-asm(
-"
-.text
-.align 4
-.globl __down_read_failed
-__down_read_failed:
-   pushl   %edx
-   pushl   %ecx
-   calldown_read_failed
-   popl%ecx
-   popl%edx
-   ret
-"
-);
-
-asm(
-"
-.text
-.align 4
-.globl __down_write_failed
-__down_write_failed:
-   pushl   %edx
-   pushl   %ecx
-   calldown_write_failed
-   popl%ecx
-   popl%edx
-   ret
-"
-);
-
-asm(
-"
-.text
-.align 4
-.globl __rwsem_wake
-__rwsem_wake:
-   pushl   %edx
-   pushl   %ecx
-   callrwsem_wake
-   popl%ecx
-   popl%edx
-   ret
-"
-);
-
-struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *sem));
-struct rw_semaphore *FASTCALL(down_read_failed(struct rw_semaphore *sem));
-struct rw_semaphore *FASTCALL(down_write_failed(struct rw_semaphore *sem));
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-   int tmp = delta;
-
-#ifndef CONFIG_USING_SPINLOCK_BASED_RWSEM
-   __asm__ __volatile__(
-   LOCK_PREFIX "xadd %0,(%1)"
-   : "+r"(tmp)
-   : "r"(sem)
-   : "memory");
-
-#else
-
-   __asm__ __volatile__(
-   "# beginning rwsem_atomic_update\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX"  decb  "RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* try to grab the 
spinlock */
-   "  js3f\n" /* jump if failed */
-   "1:\n\t"
-#endif
-   "  xchgl %0,(%1)\n\t" /* retrieve the old value */
-   "  addl  %0,(%1)\n\t" /* add 0x0001, result in memory */
-#ifdef CONFIG_SMP
-   "  movb  $1,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* release the 
spinlock */
-#endif
-   ".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-   "3:\n\t" /* spin on the spinlock till we get it */
-   "  cmpb  $0,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t"
-   "  rep;nop   \n\t"
-   "  jle   3b\n\t"
-   "  jmp   1b\n"
-#endif
-   ".previous\n"
-   "# ending rwsem_atomic_update\n\t"
-   : "+r"(tmp)
-   : "r"(sem)
-   : "memory");
-
-#endif
-   return tmp+delta;
-}
-
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
-#ifndef CONFIG_USING_SPINLOCK_BASED_RWSEM
-   return cmpxchg((__u16*)sem-count.counter,0,RWSEM_ACTIVE_BIAS);
-
-#else
-   __u16 prev;
-
-   __asm__ __volatile__(
-   "# beginning rwsem_cmpxchgw\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX"  decb