From: Peter Zijlstra <pet...@infradead.org>

Not-Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/core.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 42 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5b8223c9a723..90655c9ad937 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -92,6 +92,10 @@ static inline bool prio_less(struct task_struct *a, struct 
task_struct *b)
 
        int pa = __task_prio(a), pb = __task_prio(b);
 
+       trace_printk("(%s/%d;%d,%Lu,%Lu) ?< (%s/%d;%d,%Lu,%Lu)\n",
+                    a->comm, a->pid, pa, a->se.vruntime, a->dl.deadline,
+                    b->comm, b->pid, pb, b->se.vruntime, b->dl.deadline);
+
        if (-pa < -pb)
                return true;
 
@@ -246,6 +250,8 @@ static void __sched_core_enable(void)
 
        static_branch_enable(&__sched_core_enabled);
        stop_machine(__sched_core_stopper, (void *)true, NULL);
+
+       printk("core sched enabled\n");
 }
 
 static void __sched_core_disable(void)
@@ -254,6 +260,8 @@ static void __sched_core_disable(void)
 
        stop_machine(__sched_core_stopper, (void *)false, NULL);
        static_branch_disable(&__sched_core_enabled);
+
+       printk("core sched disabled\n");
 }
 
 void sched_core_get(void)
@@ -3707,6 +3715,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                        put_prev_task(rq, prev);
                        set_next_task(rq, next);
                }
+
+               trace_printk("pick pre selected (%u %u %u): %s/%d %lx\n",
+                            rq->core->core_task_seq,
+                            rq->core->core_pick_seq,
+                            rq->core_sched_seq,
+                            next->comm, next->pid,
+                            next->core_cookie);
+
                return next;
        }
 
@@ -3786,6 +3802,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                         */
                        if (i == cpu && !need_sync && !p->core_cookie) {
                                next = p;
+                               trace_printk("unconstrained pick: %s/%d %lx\n",
+                                            next->comm, next->pid, 
next->core_cookie);
+
                                goto done;
                        }
 
@@ -3794,6 +3813,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
 
                        rq_i->core_pick = p;
 
+                       trace_printk("cpu(%d): selected: %s/%d %lx\n",
+                                    i, p->comm, p->pid, p->core_cookie);
+
                        /*
                         * If this new candidate is of higher priority than the
                         * previous; and they're incompatible; we need to wipe
@@ -3810,6 +3832,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
struct rq_flags *rf)
                                rq->core->core_cookie = p->core_cookie;
                                max = p;
 
+                               trace_printk("max: %s/%d %lx\n", max->comm, 
max->pid, max->core_cookie);
+
                                if (old_max) {
                                        for_each_cpu(j, smt_mask) {
                                                if (j == i)
@@ -3837,6 +3861,7 @@ next_class:;
        rq->core->core_pick_seq = rq->core->core_task_seq;
        next = rq->core_pick;
        rq->core_sched_seq = rq->core->core_pick_seq;
+       trace_printk("picked: %s/%d %lx\n", next->comm, next->pid, 
next->core_cookie);
 
        /*
         * Reschedule siblings
@@ -3862,11 +3887,20 @@ next_class:;
                if (i == cpu)
                        continue;
 
-               if (rq_i->curr != rq_i->core_pick)
+               if (rq_i->curr != rq_i->core_pick) {
+                       trace_printk("IPI(%d)\n", i);
                        resched_curr(rq_i);
+               }
 
                /* Did we break L1TF mitigation requirements? */
-               WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
+               if (unlikely(!cookie_match(next, rq_i->core_pick))) {
+                       trace_printk("[%d]: cookie mismatch. 
%s/%d/0x%lx/0x%lx\n",
+                                    rq_i->cpu, rq_i->core_pick->comm,
+                                    rq_i->core_pick->pid,
+                                    rq_i->core_pick->core_cookie,
+                                    rq_i->core->core_cookie);
+                       WARN_ON_ONCE(1);
+               }
        }
 
 done:
@@ -3905,6 +3939,10 @@ static bool try_steal_cookie(int this, int that)
                if (p->core_occupation > dst->idle->core_occupation)
                        goto next;
 
+               trace_printk("core fill: %s/%d (%d->%d) %d %d %lx\n",
+                            p->comm, p->pid, that, this,
+                            p->core_occupation, dst->idle->core_occupation, 
cookie);
+
                p->on_rq = TASK_ON_RQ_MIGRATING;
                deactivate_task(src, p, 0);
                set_task_cpu(p, this);
@@ -6501,6 +6539,8 @@ int sched_cpu_starting(unsigned int cpu)
                WARN_ON_ONCE(rq->core && rq->core != core_rq);
                rq->core = core_rq;
        }
+
+       printk("core: %d -> %d\n", cpu, cpu_of(core_rq));
 #endif /* CONFIG_SCHED_CORE */
 
        sched_rq_cpu_starting(cpu);
-- 
2.17.1

Reply via email to