Re: [PATCH] sched: implement staircase deadline scheduler further improvements-1

2007-04-18 Thread Con Kolivas
On Thursday 19 April 2007 09:48, Con Kolivas wrote:
> While the Staircase Deadline scheduler has not been completely killed off
> and is still in -mm I would like to fix some outstanding issues that I've
> found since it still serves for comparison with all the upcoming
> schedulers.
>
> While still in -mm can we queue this on top please?
>
> A set of staircase-deadline v 0.41 patches will make their way into the
> usual place for those willing to test it.
>
> http://ck.kolivas.org/patches/staircase-deadline/

Oops! Minor thinko! Here is a respin. Please apply this one instead.

I better make a 0.42 heh.

---
The prio_level was being inappropriately decreased if a higher priority
task was still using previous timeslice. Fix that.

Task expiration of higher priority tasks was not being taken into account
with allocating priority slots. Check the expired best_static_prio level
to facilitate that.

Explicitly check all better static priority prio_levels when deciding on
allocating slots for niced tasks.

These changes improve behaviour in many ways.

Signed-off-by: Con Kolivas <[EMAIL PROTECTED]>

---
 kernel/sched.c |   64 ++---
 1 file changed, 43 insertions(+), 21 deletions(-)

Index: linux-2.6.21-rc7-sd/kernel/sched.c
===
--- linux-2.6.21-rc7-sd.orig/kernel/sched.c 2007-04-19 08:51:54.0 
+1000
+++ linux-2.6.21-rc7-sd/kernel/sched.c  2007-04-19 12:03:29.0 +1000
@@ -145,6 +145,12 @@ struct prio_array {
 */
DECLARE_BITMAP(prio_bitmap, MAX_PRIO + 1);
 
+   /*
+* The best static priority (of the dynamic priority tasks) queued
+* this array.
+*/
+   int best_static_prio;
+
 #ifdef CONFIG_SMP
/* For convenience looks back at rq */
struct rq *rq;
@@ -191,9 +197,9 @@ struct rq {
 
/*
 * The current dynamic priority level this runqueue is at per static
-* priority level, and the best static priority queued this rotation.
+* priority level.
 */
-   int prio_level[PRIO_RANGE], best_static_prio;
+   int prio_level[PRIO_RANGE];
 
/* How many times we have rotated the priority queue */
unsigned long prio_rotation;
@@ -669,7 +675,7 @@ static void task_new_array(struct task_s
 }
 
 /* Find the first slot from the relevant prio_matrix entry */
-static inline int first_prio_slot(struct task_struct *p)
+static int first_prio_slot(struct task_struct *p)
 {
if (unlikely(p->policy == SCHED_BATCH))
return p->static_prio;
@@ -682,11 +688,18 @@ static inline int first_prio_slot(struct
  * level. SCHED_BATCH tasks do not use the priority matrix. They only take
  * priority slots from their static_prio and above.
  */
-static inline int next_entitled_slot(struct task_struct *p, struct rq *rq)
+static int next_entitled_slot(struct task_struct *p, struct rq *rq)
 {
+   int search_prio = MAX_RT_PRIO, uprio = USER_PRIO(p->static_prio);
+   struct prio_array *array = rq->active;
DECLARE_BITMAP(tmp, PRIO_RANGE);
-   int search_prio, uprio = USER_PRIO(p->static_prio);
 
+   /*
+* Go straight to expiration if there are higher priority tasks
+* already expired.
+*/
+   if (p->static_prio > rq->expired->best_static_prio)
+   return MAX_PRIO;
if (!rq->prio_level[uprio])
rq->prio_level[uprio] = MAX_RT_PRIO;
/*
@@ -694,15 +707,22 @@ static inline int next_entitled_slot(str
 * static_prio are acceptable, and only if it's not better than
 * a queued better static_prio's prio_level.
 */
-   if (p->static_prio < rq->best_static_prio) {
-   search_prio = MAX_RT_PRIO;
+   if (p->static_prio < array->best_static_prio) {
if (likely(p->policy != SCHED_BATCH))
-   rq->best_static_prio = p->static_prio;
-   } else if (p->static_prio == rq->best_static_prio)
+   array->best_static_prio = p->static_prio;
+   } else if (p->static_prio == array->best_static_prio) {
search_prio = rq->prio_level[uprio];
-   else {
-   search_prio = max(rq->prio_level[uprio],
-   rq->prio_level[USER_PRIO(rq->best_static_prio)]);
+   } else {
+   int i;
+
+   search_prio = rq->prio_level[uprio];
+   /* A bound O(n) function, worst case n is 40 */
+   for (i = array->best_static_prio; i <= p->static_prio ; i++) {
+   if (!rq->prio_level[USER_PRIO(i)])
+   rq->prio_level[USER_PRIO(i)] = MAX_RT_PRIO;
+   search_prio = max(search_prio,
+ rq->prio_level[USER_PRIO(i)]);
+   }
}
if (unlikely(p->policy == SCHED_BATCH)) {
search_prio = max(search_prio, 

Re: [PATCH] sched: implement staircase deadline scheduler further improvements-1

2007-04-18 Thread Con Kolivas
On Thursday 19 April 2007 09:48, Con Kolivas wrote:
 While the Staircase Deadline scheduler has not been completely killed off
 and is still in -mm I would like to fix some outstanding issues that I've
 found since it still serves for comparison with all the upcoming
 schedulers.

 While still in -mm can we queue this on top please?

 A set of staircase-deadline v 0.41 patches will make their way into the
 usual place for those willing to test it.

 http://ck.kolivas.org/patches/staircase-deadline/

Oops! Minor thinko! Here is a respin. Please apply this one instead.

I better make a 0.42 heh.

---
The prio_level was being inappropriately decreased if a higher priority
task was still using previous timeslice. Fix that.

Task expiration of higher priority tasks was not being taken into account
with allocating priority slots. Check the expired best_static_prio level
to facilitate that.

Explicitly check all better static priority prio_levels when deciding on
allocating slots for niced tasks.

These changes improve behaviour in many ways.

Signed-off-by: Con Kolivas [EMAIL PROTECTED]

---
 kernel/sched.c |   64 ++---
 1 file changed, 43 insertions(+), 21 deletions(-)

Index: linux-2.6.21-rc7-sd/kernel/sched.c
===
--- linux-2.6.21-rc7-sd.orig/kernel/sched.c 2007-04-19 08:51:54.0 
+1000
+++ linux-2.6.21-rc7-sd/kernel/sched.c  2007-04-19 12:03:29.0 +1000
@@ -145,6 +145,12 @@ struct prio_array {
 */
DECLARE_BITMAP(prio_bitmap, MAX_PRIO + 1);
 
+   /*
+* The best static priority (of the dynamic priority tasks) queued
+* this array.
+*/
+   int best_static_prio;
+
 #ifdef CONFIG_SMP
/* For convenience looks back at rq */
struct rq *rq;
@@ -191,9 +197,9 @@ struct rq {
 
/*
 * The current dynamic priority level this runqueue is at per static
-* priority level, and the best static priority queued this rotation.
+* priority level.
 */
-   int prio_level[PRIO_RANGE], best_static_prio;
+   int prio_level[PRIO_RANGE];
 
/* How many times we have rotated the priority queue */
unsigned long prio_rotation;
@@ -669,7 +675,7 @@ static void task_new_array(struct task_s
 }
 
 /* Find the first slot from the relevant prio_matrix entry */
-static inline int first_prio_slot(struct task_struct *p)
+static int first_prio_slot(struct task_struct *p)
 {
if (unlikely(p-policy == SCHED_BATCH))
return p-static_prio;
@@ -682,11 +688,18 @@ static inline int first_prio_slot(struct
  * level. SCHED_BATCH tasks do not use the priority matrix. They only take
  * priority slots from their static_prio and above.
  */
-static inline int next_entitled_slot(struct task_struct *p, struct rq *rq)
+static int next_entitled_slot(struct task_struct *p, struct rq *rq)
 {
+   int search_prio = MAX_RT_PRIO, uprio = USER_PRIO(p-static_prio);
+   struct prio_array *array = rq-active;
DECLARE_BITMAP(tmp, PRIO_RANGE);
-   int search_prio, uprio = USER_PRIO(p-static_prio);
 
+   /*
+* Go straight to expiration if there are higher priority tasks
+* already expired.
+*/
+   if (p-static_prio  rq-expired-best_static_prio)
+   return MAX_PRIO;
if (!rq-prio_level[uprio])
rq-prio_level[uprio] = MAX_RT_PRIO;
/*
@@ -694,15 +707,22 @@ static inline int next_entitled_slot(str
 * static_prio are acceptable, and only if it's not better than
 * a queued better static_prio's prio_level.
 */
-   if (p-static_prio  rq-best_static_prio) {
-   search_prio = MAX_RT_PRIO;
+   if (p-static_prio  array-best_static_prio) {
if (likely(p-policy != SCHED_BATCH))
-   rq-best_static_prio = p-static_prio;
-   } else if (p-static_prio == rq-best_static_prio)
+   array-best_static_prio = p-static_prio;
+   } else if (p-static_prio == array-best_static_prio) {
search_prio = rq-prio_level[uprio];
-   else {
-   search_prio = max(rq-prio_level[uprio],
-   rq-prio_level[USER_PRIO(rq-best_static_prio)]);
+   } else {
+   int i;
+
+   search_prio = rq-prio_level[uprio];
+   /* A bound O(n) function, worst case n is 40 */
+   for (i = array-best_static_prio; i = p-static_prio ; i++) {
+   if (!rq-prio_level[USER_PRIO(i)])
+   rq-prio_level[USER_PRIO(i)] = MAX_RT_PRIO;
+   search_prio = max(search_prio,
+ rq-prio_level[USER_PRIO(i)]);
+   }
}
if (unlikely(p-policy == SCHED_BATCH)) {
search_prio = max(search_prio, p-static_prio);
@@ -718,6 +738,8 @@ static void