Reduce rbtree boiler plate by using the new helper function.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/fair.c |   46 ++++++++++++++--------------------------------
 1 file changed, 14 insertions(+), 32 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -533,12 +533,15 @@ static inline u64 min_vruntime(u64 min_v
        return min_vruntime;
 }
 
-static inline int entity_before(struct sched_entity *a,
+static inline bool entity_before(struct sched_entity *a,
                                struct sched_entity *b)
 {
        return (s64)(a->vruntime - b->vruntime) < 0;
 }
 
+#define __node_2_se(node) \
+       rb_entry((node), struct sched_entity, run_node)
+
 static void update_min_vruntime(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *curr = cfs_rq->curr;
@@ -554,8 +557,7 @@ static void update_min_vruntime(struct c
        }
 
        if (leftmost) { /* non-empty tree */
-               struct sched_entity *se;
-               se = rb_entry(leftmost, struct sched_entity, run_node);
+               struct sched_entity *se = __node_2_se(leftmost);
 
                if (!curr)
                        vruntime = se->vruntime;
@@ -571,37 +573,17 @@ static void update_min_vruntime(struct c
 #endif
 }
 
+static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+{
+       return entity_before(__node_2_se(a), __node_2_se(b));
+}
+
 /*
  * Enqueue an entity into the rb-tree:
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
-       struct rb_node *parent = NULL;
-       struct sched_entity *entry;
-       bool leftmost = true;
-
-       /*
-        * Find the right place in the rbtree:
-        */
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct sched_entity, run_node);
-               /*
-                * We dont care about collisions. Nodes with
-                * the same key stay together.
-                */
-               if (entity_before(se, entry)) {
-                       link = &parent->rb_left;
-               } else {
-                       link = &parent->rb_right;
-                       leftmost = false;
-               }
-       }
-
-       rb_link_node(&se->run_node, parent, link);
-       rb_insert_color_cached(&se->run_node,
-                              &cfs_rq->tasks_timeline, leftmost);
+       rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
 }
 
 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -616,7 +598,7 @@ struct sched_entity *__pick_first_entity
        if (!left)
                return NULL;
 
-       return rb_entry(left, struct sched_entity, run_node);
+       return __node_2_se(left);
 }
 
 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
@@ -626,7 +608,7 @@ static struct sched_entity *__pick_next_
        if (!next)
                return NULL;
 
-       return rb_entry(next, struct sched_entity, run_node);
+       return __node_2_se(next);
 }
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -637,7 +619,7 @@ struct sched_entity *__pick_last_entity(
        if (!last)
                return NULL;
 
-       return rb_entry(last, struct sched_entity, run_node);
+       return __node_2_se(last);
 }
 
 /**************************************************************


Reply via email to