Expedite synchronize_rcu during the SMT mode switch operation when
initiated via /sys/devices/system/cpu/smt/control interface

SMT mode switch operation i.e. between SMT 8 to SMT 1 or vice versa and
others are user driven operations and therefore should complete as soon
as possible. Switching SMT states involves iterating over a list of CPUs
and performing hotplug operations. It was found these transitions took
significantly large amount of time to complete particularly on
high-core-count systems.

Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Vishal Chourasia <[email protected]>
---
 include/linux/rcupdate.h | 8 ++++++++
 kernel/cpu.c             | 4 ++++
 kernel/rcu/rcu.h         | 4 ----
 3 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 7729fef249e1..61b80c29d53b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1190,6 +1190,14 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, 
rcu_callback_t f)
 extern int rcu_expedited;
 extern int rcu_normal;
 
+#ifdef CONFIG_TINY_RCU
+static inline void rcu_expedite_gp(void) { }
+static inline void rcu_unexpedite_gp(void) { }
+#else
+void rcu_expedite_gp(void);
+void rcu_unexpedite_gp(void);
+#endif
+
 DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
 DECLARE_LOCK_GUARD_0_ATTRS(rcu, __acquires_shared(RCU), __releases_shared(RCU))
 
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 62e209eda78c..1377a68d6f47 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2682,6 +2682,7 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
                ret = -EBUSY;
                goto out;
        }
+       rcu_expedite_gp();
        /* Hold cpus_write_lock() for entire batch operation. */
        cpus_write_lock();
        for_each_online_cpu(cpu) {
@@ -2714,6 +2715,7 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
        if (!ret)
                cpu_smt_control = ctrlval;
        cpus_write_unlock();
+       rcu_unexpedite_gp();
        arch_smt_update();
 out:
        cpu_maps_update_done();
@@ -2733,6 +2735,7 @@ int cpuhp_smt_enable(void)
        int cpu, ret = 0;
 
        cpu_maps_update_begin();
+       rcu_expedite_gp();
        /* Hold cpus_write_lock() for entire batch operation. */
        cpus_write_lock();
        cpu_smt_control = CPU_SMT_ENABLED;
@@ -2749,6 +2752,7 @@ int cpuhp_smt_enable(void)
                cpuhp_online_cpu_device(cpu);
        }
        cpus_write_unlock();
+       rcu_unexpedite_gp();
        arch_smt_update();
        cpu_maps_update_done();
        return ret;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index dc5d614b372c..41a0d262e964 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -512,8 +512,6 @@ do {                                                        
                \
 static inline bool rcu_gp_is_normal(void) { return true; }
 static inline bool rcu_gp_is_expedited(void) { return false; }
 static inline bool rcu_async_should_hurry(void) { return false; }
-static inline void rcu_expedite_gp(void) { }
-static inline void rcu_unexpedite_gp(void) { }
 static inline void rcu_async_hurry(void) { }
 static inline void rcu_async_relax(void) { }
 static inline bool rcu_cpu_online(int cpu) { return true; }
@@ -521,8 +519,6 @@ static inline bool rcu_cpu_online(int cpu) { return true; }
 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
 bool rcu_async_should_hurry(void);  /* Internal RCU use. */
-void rcu_expedite_gp(void);
-void rcu_unexpedite_gp(void);
 void rcu_async_hurry(void);
 void rcu_async_relax(void);
 void rcupdate_announce_bootup_oddness(void);
-- 
2.53.0


Reply via email to