Hi, perter

> On Jul 1, 2020, at 5:32 AM, Vineeth Remanan Pillai <vpil...@digitalocean.com> 
> wrote:
> 
> From: Peter Zijlstra <pet...@infradead.org>
> 
> Introduce task_struct::core_cookie as an opaque identifier for core
> scheduling. When enabled; core scheduling will only allow matching
> task to be on the core; where idle matches everything.
> 
> When task_struct::core_cookie is set (and core scheduling is enabled)
> these tasks are indexed in a second RB-tree, first on cookie value
> then on scheduling function, such that matching task selection always
> finds the most elegible match.
> 
> NOTE: *shudder* at the overhead...
> 
> NOTE: *sigh*, a 3rd copy of the scheduling function; the alternative
> is per class tracking of cookies and that just duplicates a lot of
> stuff for no raisin (the 2nd copy lives in the rt-mutex PI code).
> 
> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
> Signed-off-by: Vineeth Remanan Pillai <vpil...@digitalocean.com>
> Signed-off-by: Julien Desfossez <jdesfos...@digitalocean.com>
> ---
> include/linux/sched.h |   8 ++-
> kernel/sched/core.c   | 146 ++++++++++++++++++++++++++++++++++++++++++
> kernel/sched/fair.c   |  46 -------------
> kernel/sched/sched.h  |  55 ++++++++++++++++
> 4 files changed, 208 insertions(+), 47 deletions(-)
> 
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 4418f5cb8324..3c8dcc5ff039 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -683,10 +683,16 @@ struct task_struct {
>       const struct sched_class        *sched_class;
>       struct sched_entity             se;
>       struct sched_rt_entity          rt;
> +     struct sched_dl_entity          dl;
> +
> +#ifdef CONFIG_SCHED_CORE
> +     struct rb_node                  core_node;
> +     unsigned long                   core_cookie;
> +#endif
> +
> #ifdef CONFIG_CGROUP_SCHED
>       struct task_group               *sched_task_group;
> #endif
> -     struct sched_dl_entity          dl;
> 
> #ifdef CONFIG_UCLAMP_TASK
>       /* Clamp values requested for a scheduling entity */
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 4b81301e3f21..b21bcab20da6 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -77,6 +77,141 @@ int sysctl_sched_rt_runtime = 950000;
> 
> DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
> 
> +/* kernel prio, less is more */
> +static inline int __task_prio(struct task_struct *p)
> +{
> +     if (p->sched_class == &stop_sched_class) /* trumps deadline */
> +             return -2;
> +
> +     if (rt_prio(p->prio)) /* includes deadline */
> +             return p->prio; /* [-1, 99] */
> +
> +     if (p->sched_class == &idle_sched_class)
> +             return MAX_RT_PRIO + NICE_WIDTH; /* 140 */

return MAX_PRIO; 
May be simpler?

> +
> +     return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */

MAX_RT_PRIO(100) + MAX_NICE(19) Should be 119?  :)

Thx.
Regards,
Jiang

> +}
> +
> +/*
> + * l(a,b)
> + * le(a,b) := !l(b,a)
> + * g(a,b)  := l(b,a)
> + * ge(a,b) := !l(a,b)
> + */
> +
> +/* real prio, less is less */
> +static inline bool prio_less(struct task_struct *a, struct task_struct *b)
> +{
> +
> +     int pa = __task_prio(a), pb = __task_prio(b);
> +
> +     if (-pa < -pb)
> +             return true;
> +
> +     if (-pb < -pa)
> +             return false;
> +
> +     if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
> +             return !dl_time_before(a->dl.deadline, b->dl.deadline);
> +
> +     if (pa == MAX_RT_PRIO + MAX_NICE)  { /* fair */
> +             u64 vruntime = b->se.vruntime;
> +
> +             /*
> +              * Normalize the vruntime if tasks are in different cpus.
> +              */
> +             if (task_cpu(a) != task_cpu(b)) {
> +                     vruntime -= task_cfs_rq(b)->min_vruntime;
> +                     vruntime += task_cfs_rq(a)->min_vruntime;
> +             }
> +
> +             return !((s64)(a->se.vruntime - vruntime) <= 0);
> +     }
> +
> +     return false;
> +}
> +
> +static inline bool __sched_core_less(struct task_struct *a, struct 
> task_struct *b)
> +{
> +     if (a->core_cookie < b->core_cookie)
> +             return true;
> +
> +     if (a->core_cookie > b->core_cookie)
> +             return false;
> +
> +     /* flip prio, so high prio is leftmost */
> +     if (prio_less(b, a))
> +             return true;
> +
> +     return false;
> +}
> +
> +static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
> +{
> +     struct rb_node *parent, **node;
> +     struct task_struct *node_task;
> +
> +     rq->core->core_task_seq++;
> +
> +     if (!p->core_cookie)
> +             return;
> +
> +     node = &rq->core_tree.rb_node;
> +     parent = *node;
> +
> +     while (*node) {
> +             node_task = container_of(*node, struct task_struct, core_node);
> +             parent = *node;
> +
> +             if (__sched_core_less(p, node_task))
> +                     node = &parent->rb_left;
> +             else
> +                     node = &parent->rb_right;
> +     }
> +
> +     rb_link_node(&p->core_node, parent, node);
> +     rb_insert_color(&p->core_node, &rq->core_tree);
> +}
> +
> +static void sched_core_dequeue(struct rq *rq, struct task_struct *p)
> +{
> +     rq->core->core_task_seq++;
> +
> +     if (!p->core_cookie)
> +             return;
> +
> +     rb_erase(&p->core_node, &rq->core_tree);
> +}
> +
> +/*
> + * Find left-most (aka, highest priority) task matching @cookie.
> + */
> +static struct task_struct *sched_core_find(struct rq *rq, unsigned long 
> cookie)
> +{
> +     struct rb_node *node = rq->core_tree.rb_node;
> +     struct task_struct *node_task, *match;
> +
> +     /*
> +      * The idle task always matches any cookie!
> +      */
> +     match = idle_sched_class.pick_task(rq);
> +
> +     while (node) {
> +             node_task = container_of(node, struct task_struct, core_node);
> +
> +             if (cookie < node_task->core_cookie) {
> +                     node = node->rb_left;
> +             } else if (cookie > node_task->core_cookie) {
> +                     node = node->rb_right;
> +             } else {
> +                     match = node_task;
> +                     node = node->rb_left;
> +             }
> +     }
> +
> +     return match;
> +}
> +
> /*
>  * The static-key + stop-machine variable are needed such that:
>  *
> @@ -135,6 +270,11 @@ void sched_core_put(void)
>       mutex_unlock(&sched_core_mutex);
> }
> 
> +#else /* !CONFIG_SCHED_CORE */
> +
> +static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) 
> { }
> +static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) 
> { }
> +
> #endif /* CONFIG_SCHED_CORE */
> 
> /*
> @@ -1347,6 +1487,9 @@ static inline void init_uclamp(void) { }
> 
> static inline void enqueue_task(struct rq *rq, struct task_struct *p, int 
> flags)
> {
> +     if (sched_core_enabled(rq))
> +             sched_core_enqueue(rq, p);
> +
>       if (!(flags & ENQUEUE_NOCLOCK))
>               update_rq_clock(rq);
> 
> @@ -1361,6 +1504,9 @@ static inline void enqueue_task(struct rq *rq, struct 
> task_struct *p, int flags)
> 
> static inline void dequeue_task(struct rq *rq, struct task_struct *p, int 
> flags)
> {
> +     if (sched_core_enabled(rq))
> +             sched_core_dequeue(rq, p);
> +
>       if (!(flags & DEQUEUE_NOCLOCK))
>               update_rq_clock(rq);
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index e44a43b87975..ae17507533a0 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -260,33 +260,11 @@ const struct sched_class fair_sched_class;
>  */
> 
> #ifdef CONFIG_FAIR_GROUP_SCHED
> -static inline struct task_struct *task_of(struct sched_entity *se)
> -{
> -     SCHED_WARN_ON(!entity_is_task(se));
> -     return container_of(se, struct task_struct, se);
> -}
> 
> /* Walk up scheduling entities hierarchy */
> #define for_each_sched_entity(se) \
>               for (; se; se = se->parent)
> 
> -static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
> -{
> -     return p->se.cfs_rq;
> -}
> -
> -/* runqueue on which this entity is (to be) queued */
> -static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
> -{
> -     return se->cfs_rq;
> -}
> -
> -/* runqueue "owned" by this group */
> -static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
> -{
> -     return grp->my_q;
> -}
> -
> static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
> {
>       if (!path)
> @@ -447,33 +425,9 @@ find_matching_se(struct sched_entity **se, struct 
> sched_entity **pse)
> 
> #else /* !CONFIG_FAIR_GROUP_SCHED */
> 
> -static inline struct task_struct *task_of(struct sched_entity *se)
> -{
> -     return container_of(se, struct task_struct, se);
> -}
> -
> #define for_each_sched_entity(se) \
>               for (; se; se = NULL)
> 
> -static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
> -{
> -     return &task_rq(p)->cfs;
> -}
> -
> -static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
> -{
> -     struct task_struct *p = task_of(se);
> -     struct rq *rq = task_rq(p);
> -
> -     return &rq->cfs;
> -}
> -
> -/* runqueue "owned" by this group */
> -static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
> -{
> -     return NULL;
> -}
> -
> static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
> {
>       if (path)
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 66e586adee18..c85c5a4bc21f 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1033,6 +1033,10 @@ struct rq {
>       /* per rq */
>       struct rq               *core;
>       unsigned int            core_enabled;
> +     struct rb_root          core_tree;
> +
> +     /* shared state */
> +     unsigned int            core_task_seq;
> #endif
> };
> 
> @@ -1112,6 +1116,57 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
> #define cpu_curr(cpu)         (cpu_rq(cpu)->curr)
> #define raw_rq()              raw_cpu_ptr(&runqueues)
> 
> +#ifdef CONFIG_FAIR_GROUP_SCHED
> +static inline struct task_struct *task_of(struct sched_entity *se)
> +{
> +     SCHED_WARN_ON(!entity_is_task(se));
> +     return container_of(se, struct task_struct, se);
> +}
> +
> +static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
> +{
> +     return p->se.cfs_rq;
> +}
> +
> +/* runqueue on which this entity is (to be) queued */
> +static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
> +{
> +     return se->cfs_rq;
> +}
> +
> +/* runqueue "owned" by this group */
> +static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
> +{
> +     return grp->my_q;
> +}
> +
> +#else
> +
> +static inline struct task_struct *task_of(struct sched_entity *se)
> +{
> +     return container_of(se, struct task_struct, se);
> +}
> +
> +static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
> +{
> +     return &task_rq(p)->cfs;
> +}
> +
> +static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
> +{
> +     struct task_struct *p = task_of(se);
> +     struct rq *rq = task_rq(p);
> +
> +     return &rq->cfs;
> +}
> +
> +/* runqueue "owned" by this group */
> +static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
> +{
> +     return NULL;
> +}
> +#endif
> +
> extern void update_rq_clock(struct rq *rq);
> 
> static inline u64 __rq_clock_broken(struct rq *rq)
> -- 
> 2.17.1
> 
> 

Reply via email to