... to sched/deadline.c. This helps making sched/core.c smaller and
hopefully easier to understand and maintain.

Signed-off-by: Nicolas Pitre <n...@linaro.org>
---
 kernel/sched/core.c     | 335 +----------------------------------------------
 kernel/sched/deadline.c | 336 ++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/sched.h    |  14 ++
 3 files changed, 356 insertions(+), 329 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 94fa712791..93ce28ea34 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2148,23 +2148,6 @@ int wake_up_state(struct task_struct *p, unsigned int 
state)
 }
 
 /*
- * This function clears the sched_dl_entity static params.
- */
-void __dl_clear_params(struct task_struct *p)
-{
-       struct sched_dl_entity *dl_se = &p->dl;
-
-       dl_se->dl_runtime = 0;
-       dl_se->dl_deadline = 0;
-       dl_se->dl_period = 0;
-       dl_se->flags = 0;
-       dl_se->dl_bw = 0;
-
-       dl_se->dl_throttled = 0;
-       dl_se->dl_yielded = 0;
-}
-
-/*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  *
@@ -2443,90 +2426,6 @@ unsigned long to_ratio(u64 period, u64 runtime)
        return div64_u64(runtime << 20, period);
 }
 
-#ifdef CONFIG_SMP
-inline struct dl_bw *dl_bw_of(int i)
-{
-       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
-                        "sched RCU must be held");
-       return &cpu_rq(i)->rd->dl_bw;
-}
-
-static inline int dl_bw_cpus(int i)
-{
-       struct root_domain *rd = cpu_rq(i)->rd;
-       int cpus = 0;
-
-       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
-                        "sched RCU must be held");
-       for_each_cpu_and(i, rd->span, cpu_active_mask)
-               cpus++;
-
-       return cpus;
-}
-#else
-inline struct dl_bw *dl_bw_of(int i)
-{
-       return &cpu_rq(i)->dl.dl_bw;
-}
-
-static inline int dl_bw_cpus(int i)
-{
-       return 1;
-}
-#endif
-
-/*
- * We must be sure that accepting a new task (or allowing changing the
- * parameters of an existing one) is consistent with the bandwidth
- * constraints. If yes, this function also accordingly updates the currently
- * allocated bandwidth to reflect the new situation.
- *
- * This function is called while holding p's rq->lock.
- *
- * XXX we should delay bw change until the task's 0-lag point, see
- * __setparam_dl().
- */
-static int dl_overflow(struct task_struct *p, int policy,
-                      const struct sched_attr *attr)
-{
-
-       struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
-       u64 period = attr->sched_period ?: attr->sched_deadline;
-       u64 runtime = attr->sched_runtime;
-       u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
-       int cpus, err = -1;
-
-       /* !deadline task may carry old deadline bandwidth */
-       if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
-               return 0;
-
-       /*
-        * Either if a task, enters, leave, or stays -deadline but changes
-        * its parameters, we may need to update accordingly the total
-        * allocated bandwidth of the container.
-        */
-       raw_spin_lock(&dl_b->lock);
-       cpus = dl_bw_cpus(task_cpu(p));
-       if (dl_policy(policy) && !task_has_dl_policy(p) &&
-           !__dl_overflow(dl_b, cpus, 0, new_bw)) {
-               __dl_add(dl_b, new_bw);
-               err = 0;
-       } else if (dl_policy(policy) && task_has_dl_policy(p) &&
-                  !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
-               __dl_clear(dl_b, p->dl.dl_bw);
-               __dl_add(dl_b, new_bw);
-               err = 0;
-       } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
-               __dl_clear(dl_b, p->dl.dl_bw);
-               err = 0;
-       }
-       raw_spin_unlock(&dl_b->lock);
-
-       return err;
-}
-
-extern void init_dl_bw(struct dl_bw *dl_b);
-
 /*
  * wake_up_new_task - wake up a newly created task for the first time.
  *
@@ -4009,46 +3908,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
 }
 
 /*
- * This function initializes the sched_dl_entity of a newly becoming
- * SCHED_DEADLINE task.
- *
- * Only the static values are considered here, the actual runtime and the
- * absolute deadline will be properly calculated when the task is enqueued
- * for the first time with its new policy.
- */
-static void
-__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
-{
-       struct sched_dl_entity *dl_se = &p->dl;
-
-       dl_se->dl_runtime = attr->sched_runtime;
-       dl_se->dl_deadline = attr->sched_deadline;
-       dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
-       dl_se->flags = attr->sched_flags;
-       dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-
-       /*
-        * Changing the parameters of a task is 'tricky' and we're not doing
-        * the correct thing -- also see task_dead_dl() and switched_from_dl().
-        *
-        * What we SHOULD do is delay the bandwidth release until the 0-lag
-        * point. This would include retaining the task_struct until that time
-        * and change dl_overflow() to not immediately decrement the current
-        * amount.
-        *
-        * Instead we retain the current runtime/deadline and let the new
-        * parameters take effect after the current reservation period lapses.
-        * This is safe (albeit pessimistic) because the 0-lag point is always
-        * before the current scheduling deadline.
-        *
-        * We can still have temporary overloads because we do not delay the
-        * change in bandwidth until that time; so admission control is
-        * not on the safe side. It does however guarantee tasks will never
-        * consume more than promised.
-        */
-}
-
-/*
  * sched_setparam() passes in -1 for its policy, to let the functions
  * it calls know not to change it.
  */
@@ -4101,59 +3960,6 @@ static void __setscheduler(struct rq *rq, struct 
task_struct *p,
                p->sched_class = &fair_sched_class;
 }
 
-static void
-__getparam_dl(struct task_struct *p, struct sched_attr *attr)
-{
-       struct sched_dl_entity *dl_se = &p->dl;
-
-       attr->sched_priority = p->rt_priority;
-       attr->sched_runtime = dl_se->dl_runtime;
-       attr->sched_deadline = dl_se->dl_deadline;
-       attr->sched_period = dl_se->dl_period;
-       attr->sched_flags = dl_se->flags;
-}
-
-/*
- * This function validates the new parameters of a -deadline task.
- * We ask for the deadline not being zero, and greater or equal
- * than the runtime, as well as the period of being zero or
- * greater than deadline. Furthermore, we have to be sure that
- * user parameters are above the internal resolution of 1us (we
- * check sched_runtime only since it is always the smaller one) and
- * below 2^63 ns (we have to check both sched_deadline and
- * sched_period, as the latter can be zero).
- */
-static bool
-__checkparam_dl(const struct sched_attr *attr)
-{
-       /* deadline != 0 */
-       if (attr->sched_deadline == 0)
-               return false;
-
-       /*
-        * Since we truncate DL_SCALE bits, make sure we're at least
-        * that big.
-        */
-       if (attr->sched_runtime < (1ULL << DL_SCALE))
-               return false;
-
-       /*
-        * Since we use the MSB for wrap-around and sign issues, make
-        * sure it's not set (mind that period can be equal to zero).
-        */
-       if (attr->sched_deadline & (1ULL << 63) ||
-           attr->sched_period & (1ULL << 63))
-               return false;
-
-       /* runtime <= deadline <= period (if period != 0) */
-       if ((attr->sched_period != 0 &&
-            attr->sched_period < attr->sched_deadline) ||
-           attr->sched_deadline < attr->sched_runtime)
-               return false;
-
-       return true;
-}
-
 /*
  * Check the target process has a UID that matches the current process's:
  */
@@ -4170,19 +3976,6 @@ static bool check_same_owner(struct task_struct *p)
        return match;
 }
 
-static bool dl_param_changed(struct task_struct *p, const struct sched_attr 
*attr)
-{
-       struct sched_dl_entity *dl_se = &p->dl;
-
-       if (dl_se->dl_runtime != attr->sched_runtime ||
-               dl_se->dl_deadline != attr->sched_deadline ||
-               dl_se->dl_period != attr->sched_period ||
-               dl_se->flags != attr->sched_flags)
-               return true;
-
-       return false;
-}
-
 static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user, bool pi)
@@ -4362,7 +4155,7 @@ static int __sched_setscheduler(struct task_struct *p,
         * of a SCHED_DEADLINE task) we need to check if enough bandwidth
         * is available.
         */
-       if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
+       if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, 
attr)) {
                task_rq_unlock(rq, p, &rf);
                return -EBUSY;
        }
@@ -5468,23 +5261,12 @@ void init_idle(struct task_struct *idle, int cpu)
 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
                              const struct cpumask *trial)
 {
-       int ret = 1, trial_cpus;
-       struct dl_bw *cur_dl_b;
-       unsigned long flags;
+       int ret = 1;
 
        if (!cpumask_weight(cur))
                return ret;
 
-       rcu_read_lock_sched();
-       cur_dl_b = dl_bw_of(cpumask_any(cur));
-       trial_cpus = cpumask_weight(trial);
-
-       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
-       if (cur_dl_b->bw != -1 &&
-           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
-               ret = 0;
-       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
-       rcu_read_unlock_sched();
+       ret = dl_cpuset_cpumask_can_shrink(cur, trial);
 
        return ret;
 }
@@ -5509,34 +5291,8 @@ int task_can_attach(struct task_struct *p,
        }
 
        if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
-                                             cs_cpus_allowed)) {
-               unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
-                                                       cs_cpus_allowed);
-               struct dl_bw *dl_b;
-               bool overflow;
-               int cpus;
-               unsigned long flags;
-
-               rcu_read_lock_sched();
-               dl_b = dl_bw_of(dest_cpu);
-               raw_spin_lock_irqsave(&dl_b->lock, flags);
-               cpus = dl_bw_cpus(dest_cpu);
-               overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
-               if (overflow)
-                       ret = -EBUSY;
-               else {
-                       /*
-                        * We reserve space for this task in the destination
-                        * root_domain, as we can't fail after this point.
-                        * We will free resources in the source root_domain
-                        * later on (see set_cpus_allowed_dl()).
-                        */
-                       __dl_add(dl_b, p->dl.dl_bw);
-               }
-               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-               rcu_read_unlock_sched();
-
-       }
+                                             cs_cpus_allowed))
+               ret = dl_task_can_attach(p, cs_cpus_allowed);
 
 out:
        return ret;
@@ -5804,23 +5560,8 @@ static void cpuset_cpu_active(void)
 
 static int cpuset_cpu_inactive(unsigned int cpu)
 {
-       unsigned long flags;
-       struct dl_bw *dl_b;
-       bool overflow;
-       int cpus;
-
        if (!cpuhp_tasks_frozen) {
-               rcu_read_lock_sched();
-               dl_b = dl_bw_of(cpu);
-
-               raw_spin_lock_irqsave(&dl_b->lock, flags);
-               cpus = dl_bw_cpus(cpu);
-               overflow = __dl_overflow(dl_b, cpus, 0, 0);
-               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-               rcu_read_unlock_sched();
-
-               if (overflow)
+               if (dl_cpu_busy(cpu))
                        return -EBUSY;
                cpuset_update_active_cpus();
        } else {
@@ -6740,70 +6481,6 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
-static int sched_dl_global_validate(void)
-{
-       u64 runtime = global_rt_runtime();
-       u64 period = global_rt_period();
-       u64 new_bw = to_ratio(period, runtime);
-       struct dl_bw *dl_b;
-       int cpu, ret = 0;
-       unsigned long flags;
-
-       /*
-        * Here we want to check the bandwidth not being set to some
-        * value smaller than the currently allocated bandwidth in
-        * any of the root_domains.
-        *
-        * FIXME: Cycling on all the CPUs is overdoing, but simpler than
-        * cycling on root_domains... Discussion on different/better
-        * solutions is welcome!
-        */
-       for_each_possible_cpu(cpu) {
-               rcu_read_lock_sched();
-               dl_b = dl_bw_of(cpu);
-
-               raw_spin_lock_irqsave(&dl_b->lock, flags);
-               if (new_bw < dl_b->total_bw)
-                       ret = -EBUSY;
-               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-               rcu_read_unlock_sched();
-
-               if (ret)
-                       break;
-       }
-
-       return ret;
-}
-
-static void sched_dl_do_global(void)
-{
-       u64 new_bw = -1;
-       struct dl_bw *dl_b;
-       int cpu;
-       unsigned long flags;
-
-       def_dl_bandwidth.dl_period = global_rt_period();
-       def_dl_bandwidth.dl_runtime = global_rt_runtime();
-
-       if (global_rt_runtime() != RUNTIME_INF)
-               new_bw = to_ratio(global_rt_period(), global_rt_runtime());
-
-       /*
-        * FIXME: As above...
-        */
-       for_each_possible_cpu(cpu) {
-               rcu_read_lock_sched();
-               dl_b = dl_bw_of(cpu);
-
-               raw_spin_lock_irqsave(&dl_b->lock, flags);
-               dl_b->bw = new_bw;
-               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-               rcu_read_unlock_sched();
-       }
-}
-
 static int sched_rt_global_validate(void)
 {
        if (sysctl_sched_rt_period <= 0)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a2ce590156..e879feae5f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -17,6 +17,7 @@
 #include "sched.h"
 
 #include <linux/slab.h>
+#include <uapi/linux/sched/types.h>
 
 struct dl_bandwidth def_dl_bandwidth;
 
@@ -1854,6 +1855,341 @@ const struct sched_class dl_sched_class = {
        .update_curr            = update_curr_dl,
 };
 
+#ifdef CONFIG_SMP
+struct dl_bw *dl_bw_of(int i)
+{
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
+       return &cpu_rq(i)->rd->dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+       struct root_domain *rd = cpu_rq(i)->rd;
+       int cpus = 0;
+
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
+       for_each_cpu_and(i, rd->span, cpu_active_mask)
+               cpus++;
+
+       return cpus;
+}
+#else
+struct dl_bw *dl_bw_of(int i)
+{
+       return &cpu_rq(i)->dl.dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+       return 1;
+}
+#endif
+
+int sched_dl_global_validate(void)
+{
+       u64 runtime = global_rt_runtime();
+       u64 period = global_rt_period();
+       u64 new_bw = to_ratio(period, runtime);
+       struct dl_bw *dl_b;
+       int cpu, ret = 0;
+       unsigned long flags;
+
+       /*
+        * Here we want to check the bandwidth not being set to some
+        * value smaller than the currently allocated bandwidth in
+        * any of the root_domains.
+        *
+        * FIXME: Cycling on all the CPUs is overdoing, but simpler than
+        * cycling on root_domains... Discussion on different/better
+        * solutions is welcome!
+        */
+       for_each_possible_cpu(cpu) {
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               if (new_bw < dl_b->total_bw)
+                       ret = -EBUSY;
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+void sched_dl_do_global(void)
+{
+       u64 new_bw = -1;
+       struct dl_bw *dl_b;
+       int cpu;
+       unsigned long flags;
+
+       def_dl_bandwidth.dl_period = global_rt_period();
+       def_dl_bandwidth.dl_runtime = global_rt_runtime();
+
+       if (global_rt_runtime() != RUNTIME_INF)
+               new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+
+       /*
+        * FIXME: As above...
+        */
+       for_each_possible_cpu(cpu) {
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               dl_b->bw = new_bw;
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+       }
+}
+
+/*
+ * We must be sure that accepting a new task (or allowing changing the
+ * parameters of an existing one) is consistent with the bandwidth
+ * constraints. If yes, this function also accordingly updates the currently
+ * allocated bandwidth to reflect the new situation.
+ *
+ * This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
+ */
+int sched_dl_overflow(struct task_struct *p, int policy,
+                     const struct sched_attr *attr)
+{
+       struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+       u64 period = attr->sched_period ?: attr->sched_deadline;
+       u64 runtime = attr->sched_runtime;
+       u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
+       int cpus, err = -1;
+
+       /* !deadline task may carry old deadline bandwidth */
+       if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
+               return 0;
+
+       /*
+        * Either if a task, enters, leave, or stays -deadline but changes
+        * its parameters, we may need to update accordingly the total
+        * allocated bandwidth of the container.
+        */
+       raw_spin_lock(&dl_b->lock);
+       cpus = dl_bw_cpus(task_cpu(p));
+       if (dl_policy(policy) && !task_has_dl_policy(p) &&
+           !__dl_overflow(dl_b, cpus, 0, new_bw)) {
+               __dl_add(dl_b, new_bw);
+               err = 0;
+       } else if (dl_policy(policy) && task_has_dl_policy(p) &&
+                  !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
+               __dl_clear(dl_b, p->dl.dl_bw);
+               __dl_add(dl_b, new_bw);
+               err = 0;
+       } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
+               __dl_clear(dl_b, p->dl.dl_bw);
+               err = 0;
+       }
+       raw_spin_unlock(&dl_b->lock);
+
+       return err;
+}
+
+/*
+ * This function initializes the sched_dl_entity of a newly becoming
+ * SCHED_DEADLINE task.
+ *
+ * Only the static values are considered here, the actual runtime and the
+ * absolute deadline will be properly calculated when the task is enqueued
+ * for the first time with its new policy.
+ */
+void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = attr->sched_runtime;
+       dl_se->dl_deadline = attr->sched_deadline;
+       dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
+       dl_se->flags = attr->sched_flags;
+       dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+
+       /*
+        * Changing the parameters of a task is 'tricky' and we're not doing
+        * the correct thing -- also see task_dead_dl() and switched_from_dl().
+        *
+        * What we SHOULD do is delay the bandwidth release until the 0-lag
+        * point. This would include retaining the task_struct until that time
+        * and change sched_dl_overflow() to not immediately decrement the
+        * current amount.
+        *
+        * Instead we retain the current runtime/deadline and let the new
+        * parameters take effect after the current reservation period lapses.
+        * This is safe (albeit pessimistic) because the 0-lag point is always
+        * before the current scheduling deadline.
+        *
+        * We can still have temporary overloads because we do not delay the
+        * change in bandwidth until that time; so admission control is
+        * not on the safe side. It does however guarantee tasks will never
+        * consume more than promised.
+        */
+}
+
+void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       attr->sched_priority = p->rt_priority;
+       attr->sched_runtime = dl_se->dl_runtime;
+       attr->sched_deadline = dl_se->dl_deadline;
+       attr->sched_period = dl_se->dl_period;
+       attr->sched_flags = dl_se->flags;
+}
+
+/*
+ * This function validates the new parameters of a -deadline task.
+ * We ask for the deadline not being zero, and greater or equal
+ * than the runtime, as well as the period of being zero or
+ * greater than deadline. Furthermore, we have to be sure that
+ * user parameters are above the internal resolution of 1us (we
+ * check sched_runtime only since it is always the smaller one) and
+ * below 2^63 ns (we have to check both sched_deadline and
+ * sched_period, as the latter can be zero).
+ */
+bool __checkparam_dl(const struct sched_attr *attr)
+{
+       /* deadline != 0 */
+       if (attr->sched_deadline == 0)
+               return false;
+
+       /*
+        * Since we truncate DL_SCALE bits, make sure we're at least
+        * that big.
+        */
+       if (attr->sched_runtime < (1ULL << DL_SCALE))
+               return false;
+
+       /*
+        * Since we use the MSB for wrap-around and sign issues, make
+        * sure it's not set (mind that period can be equal to zero).
+        */
+       if (attr->sched_deadline & (1ULL << 63) ||
+           attr->sched_period & (1ULL << 63))
+               return false;
+
+       /* runtime <= deadline <= period (if period != 0) */
+       if ((attr->sched_period != 0 &&
+            attr->sched_period < attr->sched_deadline) ||
+           attr->sched_deadline < attr->sched_runtime)
+               return false;
+
+       return true;
+}
+
+/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = 0;
+       dl_se->dl_deadline = 0;
+       dl_se->dl_period = 0;
+       dl_se->flags = 0;
+       dl_se->dl_bw = 0;
+
+       dl_se->dl_throttled = 0;
+       dl_se->dl_yielded = 0;
+}
+
+bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       if (dl_se->dl_runtime != attr->sched_runtime ||
+           dl_se->dl_deadline != attr->sched_deadline ||
+           dl_se->dl_period != attr->sched_period ||
+           dl_se->flags != attr->sched_flags)
+               return true;
+
+       return false;
+}
+
+#ifdef CONFIG_SMP
+int dl_task_can_attach(struct task_struct *p, const struct cpumask 
*cs_cpus_allowed)
+{
+       unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+                                                       cs_cpus_allowed);
+       struct dl_bw *dl_b;
+       bool overflow;
+       int cpus, ret;
+       unsigned long flags;
+
+       rcu_read_lock_sched();
+       dl_b = dl_bw_of(dest_cpu);
+       raw_spin_lock_irqsave(&dl_b->lock, flags);
+       cpus = dl_bw_cpus(dest_cpu);
+       overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+       if (overflow)
+               ret = -EBUSY;
+       else {
+               /*
+                * We reserve space for this task in the destination
+                * root_domain, as we can't fail after this point.
+                * We will free resources in the source root_domain
+                * later on (see set_cpus_allowed_dl()).
+                */
+               __dl_add(dl_b, p->dl.dl_bw);
+               ret = 0;
+       }
+       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+       rcu_read_unlock_sched();
+       return ret;
+}
+
+int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                                const struct cpumask *trial)
+{
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
+       unsigned long flags;
+
+       rcu_read_lock_sched();
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
+
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+       rcu_read_unlock_sched();
+       return ret;
+}
+
+bool dl_cpu_busy(unsigned int cpu)
+{
+       unsigned long flags;
+       struct dl_bw *dl_b;
+       bool overflow;
+       int cpus;
+
+       rcu_read_lock_sched();
+       dl_b = dl_bw_of(cpu);
+       raw_spin_lock_irqsave(&dl_b->lock, flags);
+       cpus = dl_bw_cpus(cpu);
+       overflow = __dl_overflow(dl_b, cpus, 0, 0);
+       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+       rcu_read_unlock_sched();
+       return overflow;
+}
+#endif
+
 #ifdef CONFIG_SCHED_DEBUG
 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 053f60afb7..4a845c19b8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -245,6 +245,20 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 
old_bw, u64 new_bw)
 }
 
 extern void init_dl_bw(struct dl_bw *dl_b);
+extern int sched_dl_global_validate(void);
+extern void sched_dl_do_global(void);
+extern int sched_dl_overflow(struct task_struct *p, int policy,
+                            const struct sched_attr *attr);
+extern void __setparam_dl(struct task_struct *p, const struct sched_attr 
*attr);
+extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
+extern bool __checkparam_dl(const struct sched_attr *attr);
+extern void __dl_clear_params(struct task_struct *p);
+extern bool dl_param_changed(struct task_struct *p, const struct sched_attr 
*attr);
+extern int dl_task_can_attach(struct task_struct *p,
+                             const struct cpumask *cs_cpus_allowed);
+extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                                       const struct cpumask *trial);
+extern bool dl_cpu_busy(unsigned int cpu);
 
 #ifdef CONFIG_CGROUP_SCHED
 
-- 
2.9.4

Reply via email to