On Thu, Mar 21, 2019 at 05:20:17PM -0400, Julien Desfossez wrote:
> On further investigation, we could see that the contention is mostly in the
> way rq locks are taken. With this patchset, we lock the whole core if
> cpu.tag is set for at least one cgroup. Due to this, __schedule() is more or
> less serialized for the core and that attributes to the performance loss
> that we are seeing. We also saw that newidle_balance() takes considerably
> long time in load_balance() due to the rq spinlock contention. Do you think
> it would help if the core-wide locking was only performed when absolutely
> needed ?

Something like that could be done, but then you end up with 2 locks,
something which I was hoping to avoid.

Basically you keep rq->lock as it exists today, but add something like
rq->core->core_lock, you then have to take that second lock (nested
under rq->lock) for every scheduling action involving a tagged task.

It makes things complicatd though; because now my head hurts thikning
about pick_next_task().

(this can obviously do away with the whole rq->lock wrappery)

Also, completely untested..

---
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -146,6 +146,8 @@ void sched_core_enqueue(struct rq *rq, s
        if (!p->core_cookie)
                return;
 
+       raw_spin_lock(&rq->core->core_lock);
+
        node = &rq->core_tree.rb_node;
        parent = *node;
 
@@ -161,6 +163,8 @@ void sched_core_enqueue(struct rq *rq, s
 
        rb_link_node(&p->core_node, parent, node);
        rb_insert_color(&p->core_node, &rq->core_tree);
+
+       raw_spin_unlock(&rq->core->core_lock);
 }
 
 void sched_core_dequeue(struct rq *rq, struct task_struct *p)
@@ -170,7 +174,9 @@ void sched_core_dequeue(struct rq *rq, s
        if (!p->core_cookie)
                return;
 
+       raw_spin_lock(&rq->core->core_lock);
        rb_erase(&p->core_node, &rq->core_tree);
+       raw_spin_unlock(&rq->core->core_lock);
 }
 
 /*
@@ -181,6 +187,8 @@ struct task_struct *sched_core_find(stru
        struct rb_node *node = rq->core_tree.rb_node;
        struct task_struct *node_task, *match;
 
+       lockdep_assert_held(&rq->core->core_lock);
+
        /*
         * The idle task always matches any cookie!
         */
@@ -206,6 +214,8 @@ struct task_struct *sched_core_next(stru
 {
        struct rb_node *node = &p->core_node;
 
+       lockdep_assert_held(&rq->core->core_lock);
+
        node = rb_next(node);
        if (!node)
                return NULL;
@@ -3685,6 +3695,8 @@ pick_next_task(struct rq *rq, struct tas
         * If there were no {en,de}queues since we picked (IOW, the task
         * pointers are all still valid), and we haven't scheduled the last
         * pick yet, do so now.
+        *
+        * XXX probably OK without ->core_lock
         */
        if (rq->core->core_pick_seq == rq->core->core_task_seq &&
            rq->core->core_pick_seq != rq->core_sched_seq) {
@@ -3710,6 +3722,20 @@ pick_next_task(struct rq *rq, struct tas
        if (!rq->nr_running)
                newidle_balance(rq, rf);
 
+       if (!rq->core->core_cookie) {
+               for_each_class(class) {
+                       next = pick_task(rq, class, NULL);
+                       if (next)
+                               break;
+               }
+
+               if (!next->core_cookie) {
+                       set_next_task(rq, next);
+                       return next;
+               }
+       }
+
+       raw_spin_lock(&rq->core->core_lock);
        cpu = cpu_of(rq);
        smt_mask = cpu_smt_mask(cpu);
 
@@ -3849,6 +3875,7 @@ next_class:;
        trace_printk("picked: %s/%d %lx\n", next->comm, next->pid, 
next->core_cookie);
 
 done:
+       raw_spin_unlock(&rq->core->core_lock);
        set_next_task(rq, next);
        return next;
 }
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -966,6 +966,7 @@ struct rq {
        struct rb_root          core_tree;
 
        /* shared state */
+       raw_spinlock_t          core_lock;
        unsigned int            core_task_seq;
        unsigned int            core_pick_seq;
        unsigned long           core_cookie;
@@ -1007,9 +1008,6 @@ static inline bool sched_core_enabled(st
 
 static inline raw_spinlock_t *rq_lockp(struct rq *rq)
 {
-       if (sched_core_enabled(rq))
-               return &rq->core->__lock;
-
        return &rq->__lock;
 }
 

Reply via email to