On Fri, 2007-10-19 at 14:42 -0400, Steven Rostedt wrote:
> plain text document attachment (add-rq-highest-prio.patch)
> This patch adds accounting to each runqueue to keep track of the
> highest prio task queued on the run queue. We only care about
> RT tasks, so if the run queue does not contain any active RT tasks
> its priority will be considered MAX_RT_PRIO.
> 
> This information will be used for later patches.
> 
> Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
> 
> ---
>  kernel/sched.c    |    7 +++++++
>  kernel/sched_rt.c |   25 +++++++++++++++++++++++++
>  2 files changed, 32 insertions(+)
> 
> Index: linux-test.git/kernel/sched.c
> ===================================================================
> --- linux-test.git.orig/kernel/sched.c        2007-10-19 12:33:09.000000000 
> -0400
> +++ linux-test.git/kernel/sched.c     2007-10-19 12:34:32.000000000 -0400
> @@ -324,6 +324,8 @@ struct rq {
>       int push_cpu;
>       /* cpu of this runqueue: */
>       int cpu;
> +     /* highest queued rt task prio */
> +     int highest_prio;
>  
>       struct task_struct *migration_thread;
>       struct list_head migration_queue;
> @@ -972,6 +974,8 @@ static void activate_task(struct rq *rq,
>  
>       enqueue_task(rq, p, wakeup);
>       inc_nr_running(p, rq);
> +
> +     rq_prio_add_task(rq, p);
>  }

We are better off doing this in enqueue_task() or PI boosting will fail
to alter the prio of the RQ.

>  
>  /*
> @@ -984,6 +988,8 @@ static void deactivate_task(struct rq *r
>  
>       dequeue_task(rq, p, sleep);
>       dec_nr_running(p, rq);
> +
> +     rq_prio_remove_task(rq, p);

Ditto for dequeue_task()

>  }
>  
>  /**
> @@ -6619,6 +6625,7 @@ void __init sched_init(void)
>               rq->cpu = i;
>               rq->migration_thread = NULL;
>               INIT_LIST_HEAD(&rq->migration_queue);
> +             rq->highest_prio = MAX_RT_PRIO;
>  #endif
>               atomic_set(&rq->nr_iowait, 0);
>  
> Index: linux-test.git/kernel/sched_rt.c
> ===================================================================
> --- linux-test.git.orig/kernel/sched_rt.c     2007-10-19 12:33:09.000000000 
> -0400
> +++ linux-test.git/kernel/sched_rt.c  2007-10-19 12:33:23.000000000 -0400
> @@ -110,6 +110,31 @@ static struct task_struct *pick_next_tas
>       return next;
>  }
>  
> +#ifdef CONFIG_SMP
> +static inline void rq_prio_add_task(struct rq *rq, struct task_struct *p)
> +{
> +     if (unlikely(rt_task(p)) && p->prio < rq->highest_prio)
> +             rq->highest_prio = p->prio;
> +}
> +
> +static inline void rq_prio_remove_task(struct rq *rq, struct task_struct *p)
> +{
> +     struct rt_prio_array *array;
> +
> +     if (unlikely(rt_task(p))) {
> +             if (rq->rt_nr_running) {
> +                     if (p->prio >= rq->highest_prio) {

<= ?

We only need to recalc if the task being removed was the highest prio
(or higher, if that were possible but it shouldnt be).

I think this logic will technically work as-is because every task is
always >= highest in a properly accounted system, so you will just
simply recalc for every remove.  But I do like the optimization that you
were trying to add.

So for the paranoid:

                                BUG_ON(p->prio < rq->highest_prio);
                                if (p->prio == rq->highest_prio) {
                                        /* recalc */
                                }


> +                             /* recalculate */
> +                             array = &rq->rt.active;
> +                             rq->highest_prio =
> +                                     sched_find_first_bit(array->bitmap);
> +                     } /* otherwise leave rq->highest prio alone */
> +             } else
> +                     rq->highest_prio = MAX_RT_PRIO;
> +     }
> +}
> +#endif /* CONFIG_SMP */
> +
>  static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
>  {
>       update_curr_rt(rq);
> 

Attachment: signature.asc
Description: This is a digitally signed message part

Reply via email to