Commit-ID:  1edfbb4153bd29bcf8d2236676238d5237972be1
Gitweb:     http://git.kernel.org/tip/1edfbb4153bd29bcf8d2236676238d5237972be1
Author:     Jan Beulich <jbeul...@suse.com>
AuthorDate: Mon, 10 Sep 2012 12:04:16 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Thu, 13 Sep 2012 17:43:58 +0200

x86/64: Adjust types of temporaries used by ffs()/fls()/fls64()

The 64-bit special cases of the former two (the thrird one is
64-bit only anyway) don't need to use "long" temporaries, as the
result will always fit in a 32-bit variable, and the functions
return plain "int". This avoids a few REX prefixes, i.e.
minimally reduces code size.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Link: http://lkml.kernel.org/r/504de550020000780009a...@nat28.tlf.novell.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/bitops.h |   10 ++++------
 1 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 72f5009..ebaee69 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -417,10 +417,9 @@ static inline int ffs(int x)
         * We cannot do this on 32 bits because at the very least some
         * 486 CPUs did not behave this way.
         */
-       long tmp = -1;
        asm("bsfl %1,%0"
            : "=r" (r)
-           : "rm" (x), "0" (tmp));
+           : "rm" (x), "0" (-1));
 #elif defined(CONFIG_X86_CMOV)
        asm("bsfl %1,%0\n\t"
            "cmovzl %2,%0"
@@ -459,10 +458,9 @@ static inline int fls(int x)
         * We cannot do this on 32 bits because at the very least some
         * 486 CPUs did not behave this way.
         */
-       long tmp = -1;
        asm("bsrl %1,%0"
            : "=r" (r)
-           : "rm" (x), "0" (tmp));
+           : "rm" (x), "0" (-1));
 #elif defined(CONFIG_X86_CMOV)
        asm("bsrl %1,%0\n\t"
            "cmovzl %2,%0"
@@ -490,13 +488,13 @@ static inline int fls(int x)
 #ifdef CONFIG_X86_64
 static __always_inline int fls64(__u64 x)
 {
-       long bitpos = -1;
+       int bitpos = -1;
        /*
         * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
         * dest reg is undefined if x==0, but their CPU architect says its
         * value is written to set it to the same as before.
         */
-       asm("bsrq %1,%0"
+       asm("bsrq %1,%q0"
            : "+r" (bitpos)
            : "rm" (x));
        return bitpos + 1;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to