The following commit has been merged into the sched/core branch of tip:

Commit-ID:     5a7987253ef0909d94e176cd97e511013de0fe19
Gitweb:        
https://git.kernel.org/tip/5a7987253ef0909d94e176cd97e511013de0fe19
Author:        Peter Zijlstra <pet...@infradead.org>
AuthorDate:    Wed, 29 Apr 2020 17:29:58 +02:00
Committer:     Ingo Molnar <mi...@kernel.org>
CommitterDate: Wed, 17 Feb 2021 14:07:57 +01:00

rbtree, rtmutex: Use rb_add_cached()

Reduce rbtree boiler plate by using the new helpers.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Acked-by: Davidlohr Bueso <dbu...@suse.de>
---
 kernel/locking/rtmutex.c | 54 +++++++++++++--------------------------
 1 file changed, 18 insertions(+), 36 deletions(-)

diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2f8cd61..57e3804 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -267,27 +267,18 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
        return 1;
 }
 
+#define __node_2_waiter(node) \
+       rb_entry((node), struct rt_mutex_waiter, tree_entry)
+
+static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
+{
+       return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
+}
+
 static void
 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
 {
-       struct rb_node **link = &lock->waiters.rb_root.rb_node;
-       struct rb_node *parent = NULL;
-       struct rt_mutex_waiter *entry;
-       bool leftmost = true;
-
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
-               if (rt_mutex_waiter_less(waiter, entry)) {
-                       link = &parent->rb_left;
-               } else {
-                       link = &parent->rb_right;
-                       leftmost = false;
-               }
-       }
-
-       rb_link_node(&waiter->tree_entry, parent, link);
-       rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
+       rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
 }
 
 static void
@@ -300,27 +291,18 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct 
rt_mutex_waiter *waiter)
        RB_CLEAR_NODE(&waiter->tree_entry);
 }
 
+#define __node_2_pi_waiter(node) \
+       rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
+
+static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
+{
+       return rt_mutex_waiter_less(__node_2_pi_waiter(a), 
__node_2_pi_waiter(b));
+}
+
 static void
 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
 {
-       struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
-       struct rb_node *parent = NULL;
-       struct rt_mutex_waiter *entry;
-       bool leftmost = true;
-
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
-               if (rt_mutex_waiter_less(waiter, entry)) {
-                       link = &parent->rb_left;
-               } else {
-                       link = &parent->rb_right;
-                       leftmost = false;
-               }
-       }
-
-       rb_link_node(&waiter->pi_tree_entry, parent, link);
-       rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, 
leftmost);
+       rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, 
__pi_waiter_less);
 }
 
 static void

Reply via email to