Could you please fold this into the 6/7 patch.

It reverts a wandering chunk (the 32768 thing), but more importantly
it fixes !FAIR_GROUP_SCHED compilation.

Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
---
 kernel/sched.c |   10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -647,7 +647,7 @@ const_debug unsigned int sysctl_sched_rt
  * ratio of time -rt tasks may consume.
  * default: 95%
  */
-const_debug unsigned int sysctl_sched_rt_ratio = 32768; //62259;
+const_debug unsigned int sysctl_sched_rt_ratio = 62259;
 
 /*
  * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -5379,6 +5379,7 @@ static void __init sched_rt_period_init(
        hotcpu_notifier(sched_rt_period_hotplug, 0);
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
 static void __sched_rt_period_init_tg(void *arg)
 {
        struct task_group *tg = arg;
@@ -5404,12 +5405,14 @@ static void sched_rt_period_destroy_tg(s
 {
        on_each_cpu(__sched_rt_period_destroy_tg, tg, 0, 1);
 }
-#else
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+#else /* CONFIG_SMP */
 static void __init sched_rt_period_init(void)
 {
        sched_rt_period_start_cpu(0);
 }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
 static void sched_rt_period_init_tg(struct task_group *tg)
 {
        sched_rt_period_start(tg->rt_rq[0]);
@@ -5419,7 +5422,8 @@ static void sched_rt_period_destroy_tg(s
 {
        sched_rt_period_stop(tg->rt_rq[0]);
 }
-#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+#endif /* CONFIG_SMP */
 
 #ifdef CONFIG_SMP
 /*


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to