On Sun, 2015-04-05 at 09:23 +0200, Nicholas Mc Guire wrote:
> The majority of the msecs_to_jiffies() users in the kernel are passing in
> constants which would allow gcc to do constant folding by checking with
> __builtin_constant_p() in msecs_to_jiffies().
> 
> The original msecs_to_jiffies is renamed to __msecs_to_jiffies and aside
> from the removal of the check for negative values being moved out, is
> unaltered.

At least for gcc 4.9, this doesn't allow the compiler
to optimize / precalculation msecs_to_jiffies calls
with a constant.

This does: (on top of your patch x86-64 defconfig)

$ size vmlinux.o.*
   text    data     bss     dec     hex filename
11770523        1505971 1018454 14294948         da1fa4 
vmlinux.o.next-b0a12fb5bc8
11770530        1505971 1018454 14294955         da1fab 
vmlinux.o.next-b0a12fb5bc8-inline
11768734        1505971 1018454 14293159         da18a7 
vmlinux.o.next-b0a12fb5bc8-macro

I think this should still move the if (m) < 0 back into the
original __msecs_to_jiffies function.

---

 include/linux/jiffies.h | 71 ++++++++++++++++++++++++++++++++-----------------
 1 file changed, 46 insertions(+), 25 deletions(-)

diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index a75158e..f8fe9f7 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -291,6 +291,39 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
 
 extern unsigned long __msecs_to_jiffies(const unsigned int m);
 
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+#define const_msecs_to_jiffies(m)                                      \
+({                                                                     \
+       unsigned long j;                                                \
+       if ((int)m < 0)                                                 \
+               j = MAX_JIFFY_OFFSET;                                   \
+       else                                                            \
+               j = (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); \
+       j;                                                              \
+})
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+#define const_msecs_to_jiffies(m)                                      \
+({                                                                     \
+       unsigned long j;                                                \
+       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))                     \
+               j = MAX_JIFFY_OFFSET;                                   \
+       else                                                            \
+               j = m * (HZ / MSEC_PER_SEC);                            \
+       j;                                                              \
+})
+#else
+#define const_msecs_to_jiffies(m)                                      \
+({                                                                     \
+       unsigned long j;                                                \
+       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))\
+               j = MAX_JIFFY_OFFSET;                                   \
+       else                                                            \
+               j = (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)           \
+                       >> MSEC_TO_HZ_SHR32;                            \
+       j;                                                              \
+})
+#endif
+
 /**
  * msecs_to_jiffies: - convert milliseconds to jiffies
  * @m: time in millisecons 
@@ -313,31 +346,19 @@ extern unsigned long __msecs_to_jiffies(const unsigned 
int m);
  * allow constant folding and the actual conversion must be done at
  * runtime.
  */
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
-{
-       /*
-        * Negative value, means infinite timeout:
-        */
-       if ((int)m < 0)
-               return MAX_JIFFY_OFFSET;
-
-       if (__builtin_constant_p(m)) {
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
-               return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
-               if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-                       return MAX_JIFFY_OFFSET;
-               return m * (HZ / MSEC_PER_SEC);
-#else
-               if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-                       return MAX_JIFFY_OFFSET;
-
-               return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
-                       >> MSEC_TO_HZ_SHR32;
-#endif
-       } else
-               return __msecs_to_jiffies(m);
-}
+#define msecs_to_jiffies(m)                                            \
+({                                                                     \
+       unsigned long j;                                                \
+       if (__builtin_constant_p(m)) {                                  \
+               if ((int)m < 0)                                         \
+                       j = MAX_JIFFY_OFFSET;                           \
+               else                                                    \
+                       j = const_msecs_to_jiffies(m);                  \
+       } else {                                                        \
+               j = __msecs_to_jiffies(m);                              \
+       }                                                               \
+       j;                                                              \
+})
 
 extern unsigned long usecs_to_jiffies(const unsigned int u);
 extern unsigned long timespec_to_jiffies(const struct timespec *value);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to