From: Gregory Haskins <[EMAIL PROTECTED]>

Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().

Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>
Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
---

 kernel/sched_rt.c |   66 +++++++++++++++++++++++++++++++-----------------------
 1 file changed, 39 insertions(+), 27 deletions(-)

Index: linux-compile.git/kernel/sched_rt.c
===================================================================
--- linux-compile.git.orig/kernel/sched_rt.c    2007-11-20 19:53:04.000000000 
-0500
+++ linux-compile.git/kernel/sched_rt.c 2007-11-20 19:53:05.000000000 -0500
@@ -260,54 +260,66 @@ static struct task_struct *pick_next_hig
 
 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
 
-/* Will lock the rq it finds */
-static struct rq *find_lock_lowest_rq(struct task_struct *task,
-                                     struct rq *this_rq)
+static int find_lowest_rq(struct task_struct *task)
 {
-       struct rq *lowest_rq = NULL;
        int cpu;
-       int tries;
        cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
+       struct rq *lowest_rq = NULL;
 
        cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
 
-       for (tries = 0; tries < RT_MAX_TRIES; tries++) {
-               /*
-                * Scan each rq for the lowest prio.
-                */
-               for_each_cpu_mask(cpu, *cpu_mask) {
-                       struct rq *rq = &per_cpu(runqueues, cpu);
+       /*
+        * Scan each rq for the lowest prio.
+        */
+       for_each_cpu_mask(cpu, *cpu_mask) {
+               struct rq *rq = cpu_rq(cpu);
 
-                       if (cpu == this_rq->cpu)
-                               continue;
+               if (cpu == rq->cpu)
+                       continue;
 
-                       /* We look for lowest RT prio or non-rt CPU */
-                       if (rq->rt.highest_prio >= MAX_RT_PRIO) {
-                               lowest_rq = rq;
-                               break;
-                       }
+               /* We look for lowest RT prio or non-rt CPU */
+               if (rq->rt.highest_prio >= MAX_RT_PRIO) {
+                       lowest_rq = rq;
+                       break;
+               }
 
-                       /* no locking for now */
-                       if (rq->rt.highest_prio > task->prio &&
-                           (!lowest_rq || rq->rt.highest_prio > 
lowest_rq->rt.highest_prio)) {
-                               lowest_rq = rq;
-                       }
+               /* no locking for now */
+               if (rq->rt.highest_prio > task->prio &&
+                   (!lowest_rq || rq->rt.highest_prio > 
lowest_rq->rt.highest_prio)) {
+                       lowest_rq = rq;
                }
+       }
+
+       return lowest_rq ? lowest_rq->cpu : -1;
+}
+
+/* Will lock the rq it finds */
+static struct rq *find_lock_lowest_rq(struct task_struct *task,
+                                     struct rq *rq)
+{
+       struct rq *lowest_rq = NULL;
+       int cpu;
+       int tries;
 
-               if (!lowest_rq)
+       for (tries = 0; tries < RT_MAX_TRIES; tries++) {
+               cpu = find_lowest_rq(task);
+
+               if (cpu == -1)
                        break;
 
+               lowest_rq = cpu_rq(cpu);
+
                /* if the prio of this runqueue changed, try again */
-               if (double_lock_balance(this_rq, lowest_rq)) {
+               if (double_lock_balance(rq, lowest_rq)) {
                        /*
                         * We had to unlock the run queue. In
                         * the mean time, task could have
                         * migrated already or had its affinity changed.
                         * Also make sure that it wasn't scheduled on its rq.
                         */
-                       if (unlikely(task_rq(task) != this_rq ||
+                       if (unlikely(task_rq(task) != rq ||
                                     !cpu_isset(lowest_rq->cpu, 
task->cpus_allowed) ||
-                                    task_running(this_rq, task) ||
+                                    task_running(rq, task) ||
                                     !task->se.on_rq)) {
                                spin_unlock(&lowest_rq->lock);
                                lowest_rq = NULL;

-- 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to