Commit-ID:  71c01930b42e5dd65d4820dea116bcbe95a0b768
Gitweb:     http://git.kernel.org/tip/71c01930b42e5dd65d4820dea116bcbe95a0b768
Author:     Borislav Petkov <[email protected]>
AuthorDate: Wed, 27 Apr 2016 13:47:32 +0200
Committer:  Ingo Molnar <[email protected]>
CommitDate: Thu, 28 Apr 2016 10:42:56 +0200

locking/rwsem, x86: Clean up ____down_write()

Move the RWSEM_ACTIVE_WRITE_BIAS out of the inline asm to reduce the
number of arguments. Also, make it an input argument only (why it was an
output operand, I still don't know...).

For better readability, use symbolic names for the arguments and move
the linebreak backspace to 80 cols.

Resulting asm differs only in the temporary GCC variable names and
locations:

  -- before      2016-04-27 13:39:05.320778458 +0200
  ++ after       2016-04-27 13:52:37.336778994 +0200
  @@ -11,8 +11,8 @@ down_write_killable:
   .LBB84:
   .LBB85:
   .LBB86:
  -        .loc 2 128 0
  -        movabsq $-4294967295, %rdx      #, tmp
  +        .loc 2 130 0
  +        movabsq $-4294967295, %rdx      #, tmp94
           movq    %rdi, %rax      # sem, sem
   .LBE86:
   .LBE85:
  @@ -23,17 +23,17 @@ down_write_killable:
   .LBB89:
   .LBB88:
   .LBB87:
  -        .loc 2 128 0
  +        .loc 2 130 0
   #APP
  -# 128 "./arch/x86/include/asm/rwsem.h" 1
  +# 130 "./arch/x86/include/asm/rwsem.h" 1
           # beginning down_write
           .pushsection .smp_locks,"a"
   .balign 4
   .long 671f - .
   .popsection
   671:
  -        lock;   xadd      %rdx,(%rax)   # tmp, sem
  -          test  %edx , %edx     # tmp
  +        lock;   xadd      %rdx,(%rax)   # tmp94, sem
  +          test  %edx , %edx     # tmp94
             jz        1f
     call call_rwsem_down_write_failed_killable
   1:

Signed-off-by: Borislav Petkov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Chris Zankel <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Max Filippov <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
 arch/x86/include/asm/rwsem.h | 36 +++++++++++++++++++-----------------
 1 file changed, 19 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 453744c..d2f8d10 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -99,23 +99,25 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
 /*
  * lock for writing
  */
-#define ____down_write(sem, slow_path)                 \
-({                                                     \
-       long tmp;                                       \
-       struct rw_semaphore* ret;                       \
-       asm volatile("# beginning down_write\n\t"       \
-                    LOCK_PREFIX "  xadd      %1,(%3)\n\t"      \
-                    /* adds 0xffff0001, returns the old value */ \
-                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" 
\
-                    /* was the active mask 0 before? */\
-                    "  jz        1f\n"                 \
-                    "  call " slow_path "\n"           \
-                    "1:\n"                             \
-                    "# ending down_write"              \
-                    : "+m" (sem->count), "=d" (tmp), "=a" (ret)        \
-                    : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
-                    : "memory", "cc");                 \
-       ret;                                            \
+#define ____down_write(sem, slow_path)                                         
\
+({                                                                             
\
+       long tmp = RWSEM_ACTIVE_WRITE_BIAS;                                     
\
+       struct rw_semaphore* ret;                                               
\
+                                                                               
\
+       asm volatile("# beginning down_write\n\t"                               
\
+                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"              
\
+                    /* adds 0xffff0001, returns the old value */               
\
+                    "  test " __ASM_SEL(%w[tmp],%k[tmp]) ","                   
\
+                              __ASM_SEL(%w[tmp],%k[tmp]) "\n\t"                
\
+                    /* was the active mask 0 before? */                        
\
+                    "  jz        1f\n"                                         
\
+                    "  call " slow_path "\n"                                   
\
+                    "1:\n"                                                     
\
+                    "# ending down_write"                                      
\
+                    : "+m" (sem->count), "=a" (ret)                            
\
+                    : [sem] "a" (sem), [tmp] "r" (tmp)                         
\
+                    : "memory", "cc");                                         
\
+       ret;                                                                    
\
 })
 
 static inline void __down_write(struct rw_semaphore *sem)

Reply via email to