This is going to be used by irqtime accounting. The scheduler accesses
irqtime from fast-path where preemption is already disabled.

Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carst...@de.ibm.com>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Martin Schwidefsky <schwidef...@de.ibm.com>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Wu Fengguang <fengguang...@intel.com>
Signed-off-by: Frederic Weisbecker <fweis...@gmail.com>
---
 include/linux/u64_stats_sync.h | 35 +++++++++++++++++++++++------------
 1 file changed, 23 insertions(+), 12 deletions(-)

diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 4b4439e..20c26dc 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -88,29 +88,40 @@ static inline void u64_stats_update_end(struct 
u64_stats_sync *syncp)
 #endif
 }
 
+/* Preempt-unsafe version of u64_stats_fetch_begin */
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync 
*syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+       return read_seqcount_begin(&syncp->seq);
+#endif
+       return 0;
+}
+
+/* Preempt-unsafe version of u64_stats_fetch_retry */
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+                                        unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+       return read_seqcount_retry(&syncp->seq, start);
+#endif
+       return false;
+}
+
 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync 
*syncp)
 {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-       return read_seqcount_begin(&syncp->seq);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
        preempt_disable();
 #endif
-       return 0;
-#endif
+       return __u64_stats_fetch_begin(syncp);
 }
 
 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
                                         unsigned int start)
 {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-       return read_seqcount_retry(&syncp->seq, start);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
        preempt_enable();
 #endif
-       return false;
-#endif
+       return __u64_stats_fetch_retry(syncp, start);
 }
 
 /*
-- 
2.1.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to