Add entity variants of put_prev_task_fair() and set_curr_task_fair()
that will be later used by coscheduling.

Signed-off-by: Jan H. Schönherr <jscho...@amazon.de>
---
 kernel/sched/fair.c  | 34 +++++++++++++++++++++-------------
 kernel/sched/sched.h |  2 ++
 2 files changed, 23 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f13fb4460b66..18b1d81951f1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6651,12 +6651,8 @@ done: __maybe_unused;
        return NULL;
 }
 
-/*
- * Account for a descheduled task:
- */
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
+void put_prev_entity_fair(struct rq *rq, struct sched_entity *se)
 {
-       struct sched_entity *se = &prev->se;
        struct cfs_rq *cfs_rq;
 
        for_each_sched_entity(se) {
@@ -6666,6 +6662,14 @@ static void put_prev_task_fair(struct rq *rq, struct 
task_struct *prev)
 }
 
 /*
+ * Account for a descheduled task:
+ */
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
+{
+       put_prev_entity_fair(rq, &prev->se);
+}
+
+/*
  * sched_yield() is very simple
  *
  * The magic of dealing with the ->skip buddy is in pick_next_entity.
@@ -9758,15 +9762,8 @@ static void switched_to_fair(struct rq *rq, struct 
task_struct *p)
        }
 }
 
-/* Account for a task changing its policy or group.
- *
- * This routine is mostly called to set cfs_rq->curr field when a task
- * migrates between groups/classes.
- */
-static void set_curr_task_fair(struct rq *rq)
+void set_curr_entity_fair(struct rq *rq, struct sched_entity *se)
 {
-       struct sched_entity *se = &rq->curr->se;
-
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
@@ -9776,6 +9773,17 @@ static void set_curr_task_fair(struct rq *rq)
        }
 }
 
+/*
+ * Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+static void set_curr_task_fair(struct rq *rq)
+{
+       set_curr_entity_fair(rq, &rq->curr->se);
+}
+
 void init_cfs_rq(struct cfs_rq *cfs_rq)
 {
        cfs_rq->tasks_timeline = RB_ROOT_CACHED;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 569a487ed07c..b36e61914a42 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1547,6 +1547,8 @@ bool enqueue_entity_fair(struct rq *rq, struct 
sched_entity *se, int flags,
                         unsigned int task_delta);
 bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
                         unsigned int task_delta);
+void put_prev_entity_fair(struct rq *rq, struct sched_entity *se);
+void set_curr_entity_fair(struct rq *rq, struct sched_entity *se);
 
 struct sched_class {
        const struct sched_class *next;
-- 
2.9.3.1.gcba166c.dirty

Reply via email to