Tested-by: Julien Desfossez <jdesfos...@digitalocean.com>
Not-Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/core.c | 40 +++++++++++++++++++++++++++++++++++++++-
 kernel/sched/fair.c | 12 ++++++++++++
 2 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a733891dfe7d..2649efeac19f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -106,6 +106,10 @@ static inline bool prio_less(struct task_struct *a, struct 
task_struct *b, bool
 
        int pa = __task_prio(a), pb = __task_prio(b);
 
+       trace_printk("(%s/%d;%d,%llu,%llu) ?< (%s/%d;%d,%llu,%llu)\n",
+                    a->comm, a->pid, pa, a->se.vruntime, a->dl.deadline,
+                    b->comm, b->pid, pb, b->se.vruntime, b->dl.deadline);
+
        if (-pa < -pb)
                return true;
 
@@ -292,12 +296,16 @@ static void __sched_core_enable(void)
 
        static_branch_enable(&__sched_core_enabled);
        __sched_core_flip(true);
+
+       printk("core sched enabled\n");
 }
 
 static void __sched_core_disable(void)
 {
        __sched_core_flip(false);
        static_branch_disable(&__sched_core_enabled);
+
+       printk("core sched disabled\n");
 }
 
 void sched_core_get(void)
@@ -5361,6 +5369,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                        set_next_task(rq, next);
                }
 
+               trace_printk("pick pre selected (%u %u %u): %s/%d %llu\n",
+                            rq->core->core_task_seq,
+                            rq->core->core_pick_seq,
+                            rq->core_sched_seq,
+                            next->comm, next->pid,
+                            next->core_cookie.userspace_id);
+
                rq->core_pick = NULL;
                return next;
        }
@@ -5455,6 +5470,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                                        rq->core->core_forceidle_seq++;
                        }
 
+                       trace_printk("cpu(%d): selected: %s/%d %llu\n",
+                                    i, p->comm, p->pid,
+                                    p->core_cookie.userspace_id);
+
                        /*
                         * If this new candidate is of higher priority than the
                         * previous; and they're incompatible; we need to wipe
@@ -5471,6 +5490,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                                rq->core->core_cookie = p->core_cookie;
                                max = p;
 
+                               trace_printk("max: %s/%d %llu\n",
+                                            max->comm, max->pid,
+                                            max->core_cookie.userspace_id);
+
                                if (old_max) {
                                        rq->core->core_forceidle = false;
                                        for_each_cpu(j, smt_mask) {
@@ -5492,6 +5515,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
 
        /* Something should have been selected for current CPU */
        WARN_ON_ONCE(!next);
+       trace_printk("picked: %s/%d %llu\n", next->comm, next->pid,
+                    next->core_cookie.userspace_id);
 
        /*
         * Reschedule siblings
@@ -5533,13 +5558,21 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                }
 
                /* Did we break L1TF mitigation requirements? */
-               WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
+               if (unlikely(!cookie_match(next, rq_i->core_pick))) {
+                       trace_printk("[%d]: cookie mismatch. 
%s/%d/0x%llu/0x%llu\n",
+                                    rq_i->cpu, rq_i->core_pick->comm,
+                                    rq_i->core_pick->pid,
+                                    rq_i->core_pick->core_cookie.userspace_id,
+                                    rq_i->core->core_cookie.userspace_id);
+                       WARN_ON_ONCE(1);
+               }
 
                if (rq_i->curr == rq_i->core_pick) {
                        rq_i->core_pick = NULL;
                        continue;
                }
 
+               trace_printk("IPI(%d)\n", i);
                resched_curr(rq_i);
        }
 
@@ -5579,6 +5612,11 @@ static bool try_steal_cookie(int this, int that)
                if (p->core_occupation > dst->idle->core_occupation)
                        goto next;
 
+               trace_printk("core fill: %s/%d (%d->%d) %d %d %llu\n",
+                            p->comm, p->pid, that, this,
+                            p->core_occupation, dst->idle->core_occupation,
+                            cookie->userspace_id);
+
                p->on_rq = TASK_ON_RQ_MIGRATING;
                deactivate_task(src, p, 0);
                set_task_cpu(p, this);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 12030b73a032..2432420b4bef 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10783,6 +10783,9 @@ static inline void task_tick_core(struct rq *rq, struct 
task_struct *curr)
  */
 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool 
forceidle)
 {
+       bool root = true;
+       long old, new;
+
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
@@ -10792,6 +10795,15 @@ static void se_fi_update(struct sched_entity *se, 
unsigned int fi_seq, bool forc
                        cfs_rq->forceidle_seq = fi_seq;
                }
 
+
+               if (root) {
+                       old = cfs_rq->min_vruntime_fi;
+                       new = cfs_rq->min_vruntime;
+                       root = false;
+                       trace_printk("cfs_rq(min_vruntime_fi) %lu->%lu\n",
+                                    old, new);
+               }
+
                cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
        }
 }
-- 
2.31.0.291.g576ba9dcdaf-goog

Reply via email to