Re: [RFC PATCH 03/10] sched: Forced task migration on heterogeneous systems

2012-10-04 Thread Viresh Kumar
Minor comments here :)

On 22 September 2012 00:02,  morten.rasmus...@arm.com wrote:

 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
 index d80de46..490f1f0 100644
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
 @@ -3744,7 +3744,6 @@ int can_migrate_task(struct task_struct *p, struct 
 lb_env *env)
  * 1) task is cache cold, or
  * 2) too many balance attempts have failed.
  */
 -

:(

 tsk_cache_hot = task_hot(p, env-src_rq-clock_task, env-sd);
 if (!tsk_cache_hot ||
 env-sd-nr_balance_failed  env-sd-cache_nice_tries) {
 @@ -5516,6 +5515,199 @@ static unsigned int hmp_down_migration(int cpu, 
 struct sched_entity *se)
 return 0;
  }


 +static int hmp_can_migrate_task(struct task_struct *p, struct lb_env *env)
 +{

...

 +static int move_specific_task(struct lb_env *env, struct task_struct *pm)
 +{
 +   struct task_struct *p, *n;
 +
 +   list_for_each_entry_safe(p, n, env-src_rq-cfs_tasks, 
 se.group_node) {
 +   if (throttled_lb_pair(task_group(p), env-src_rq-cpu,
 +   env-dst_cpu))
 +   continue;

Please fix indentation of above if statement.

...

 +#else
 +static void hmp_force_up_migration(int this_cpu) { }

inline?

--
viresh

___
linaro-dev mailing list
linaro-dev@lists.linaro.org
http://lists.linaro.org/mailman/listinfo/linaro-dev


[RFC PATCH 03/10] sched: Forced task migration on heterogeneous systems

2012-09-21 Thread morten . rasmussen
From: Morten Rasmussen morten.rasmus...@arm.com

This patch introduces forced task migration for moving suitable
currently running tasks between hmp_domains. Task behaviour is likely
to change over time. Tasks running in a less capable hmp_domain may
change to become more demanding and should therefore be migrated up.
They are unlikely go through the select_task_rq_fair() path anytime
soon and therefore need special attention.

This patch introduces a period check (SCHED_TICK) of the currently
running task on all runqueues and sets up a forced migration using
stop_machine_no_wait() if the task needs to be migrated.

Ideally, this should not be implemented by polling all runqueues.

Signed-off-by: Morten Rasmussen morten.rasmus...@arm.com
---
 kernel/sched/fair.c  |  196 +-
 kernel/sched/sched.h |3 +
 2 files changed, 198 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d80de46..490f1f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3744,7 +3744,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env 
*env)
 * 1) task is cache cold, or
 * 2) too many balance attempts have failed.
 */
-
tsk_cache_hot = task_hot(p, env-src_rq-clock_task, env-sd);
if (!tsk_cache_hot ||
env-sd-nr_balance_failed  env-sd-cache_nice_tries) {
@@ -5516,6 +5515,199 @@ static unsigned int hmp_down_migration(int cpu, struct 
sched_entity *se)
return 0;
 }
 
+/*
+ * hmp_can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
+ * Ideally this function should be merged with can_migrate_task() to avoid
+ * redundant code.
+ */
+static int hmp_can_migrate_task(struct task_struct *p, struct lb_env *env)
+{
+   int tsk_cache_hot = 0;
+
+   /*
+* We do not migrate tasks that are:
+* 1) running (obviously), or
+* 2) cannot be migrated to this CPU due to cpus_allowed
+*/
+   if (!cpumask_test_cpu(env-dst_cpu, tsk_cpus_allowed(p))) {
+   schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+   return 0;
+   }
+   env-flags = ~LBF_ALL_PINNED;
+
+   if (task_running(env-src_rq, p)) {
+   schedstat_inc(p, se.statistics.nr_failed_migrations_running);
+   return 0;
+   }
+
+   /*
+* Aggressive migration if:
+* 1) task is cache cold, or
+* 2) too many balance attempts have failed.
+*/
+
+   tsk_cache_hot = task_hot(p, env-src_rq-clock_task, env-sd);
+   if (!tsk_cache_hot ||
+   env-sd-nr_balance_failed  env-sd-cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+   if (tsk_cache_hot) {
+   schedstat_inc(env-sd, lb_hot_gained[env-idle]);
+   schedstat_inc(p, se.statistics.nr_forced_migrations);
+   }
+#endif
+   return 1;
+   }
+
+   return 1;
+}
+
+/*
+ * move_specific_task tries to move a specific task.
+ * Returns 1 if successful and 0 otherwise.
+ * Called with both runqueues locked.
+ */
+static int move_specific_task(struct lb_env *env, struct task_struct *pm)
+{
+   struct task_struct *p, *n;
+
+   list_for_each_entry_safe(p, n, env-src_rq-cfs_tasks, se.group_node) {
+   if (throttled_lb_pair(task_group(p), env-src_rq-cpu,
+   env-dst_cpu))
+   continue;
+
+   if (!hmp_can_migrate_task(p, env))
+   continue;
+   /* Check if we found the right task */
+   if (p != pm)
+   continue;
+
+   move_task(p, env);
+   /*
+* Right now, this is only the third place move_task()
+* is called, so we can safely collect move_task()
+* stats here rather than inside move_task().
+*/
+   schedstat_inc(env-sd, lb_gained[env-idle]);
+   return 1;
+   }
+   return 0;
+}
+
+/*
+ * hmp_active_task_migration_cpu_stop is run by cpu stopper and used to
+ * migrate a specific task from one runqueue to another.
+ * hmp_force_up_migration uses this to push a currently running task
+ * off a runqueue.
+ * Based on active_load_balance_stop_cpu and can potentially be merged.
+ */
+static int hmp_active_task_migration_cpu_stop(void *data)
+{
+   struct rq *busiest_rq = data;
+   struct task_struct *p = busiest_rq-migrate_task;
+   int busiest_cpu = cpu_of(busiest_rq);
+   int target_cpu = busiest_rq-push_cpu;
+   struct rq *target_rq = cpu_rq(target_cpu);
+   struct sched_domain *sd;
+
+   raw_spin_lock_irq(busiest_rq-lock);
+   /* make sure the requested cpu hasn't gone down in the meantime */
+   if (unlikely(busiest_cpu != smp_processor_id() ||
+   !busiest_rq-active_balance)) {
+   goto out_unlock;
+   }
+   /* Is there any