Gregory Haskins RT balancing broke sched domains.
This is a fix to allow it to still work.

Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>

---
 include/linux/sched.h   |    3 ++-
 kernel/sched.c          |   17 ++++++++++++++---
 kernel/sched_fair.c     |   19 ++++++++++++++-----
 kernel/sched_idletask.c |    3 ++-
 kernel/sched_rt.c       |    3 ++-
 5 files changed, 34 insertions(+), 11 deletions(-)

Index: linux-compile.git/kernel/sched.c
===================================================================
--- linux-compile.git.orig/kernel/sched.c       2007-11-17 00:15:57.000000000 
-0500
+++ linux-compile.git/kernel/sched.c    2007-11-17 00:15:57.000000000 -0500
@@ -1453,6 +1453,7 @@ static int try_to_wake_up(struct task_st
        unsigned long flags;
        long old_state;
        struct rq *rq;
+       struct sched_domain *this_sd = NULL;
 #ifdef CONFIG_SMP
        int new_cpu;
 #endif
@@ -1476,10 +1477,20 @@ static int try_to_wake_up(struct task_st
        schedstat_inc(rq, ttwu_count);
        if (cpu == this_cpu)
                schedstat_inc(rq, ttwu_local);
-       else
-               schedstat_inc(rq->sd, ttwu_wake_remote);
+       else {
+#ifdef CONFIG_SCHEDSTATS
+               struct sched_domain *sd;
+               for_each_domain(this_cpu, sd) {
+                       if (cpu_isset(cpu, sd->span)) {
+                               schedstat_inc(sd, ttwu_wake_remote);
+                               this_sd = sd;
+                               break;
+                       }
+               }
+#endif /* CONFIG_SCHEDSTATES */
+       }
 
-       new_cpu = p->sched_class->select_task_rq(p, sync);
+       new_cpu = p->sched_class->select_task_rq(p, this_sd, sync);
 
        if (new_cpu != cpu) {
                set_task_cpu(p, new_cpu);
Index: linux-compile.git/include/linux/sched.h
===================================================================
--- linux-compile.git.orig/include/linux/sched.h        2007-11-17 
00:15:57.000000000 -0500
+++ linux-compile.git/include/linux/sched.h     2007-11-17 00:15:57.000000000 
-0500
@@ -823,7 +823,8 @@ struct sched_class {
        void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
        void (*yield_task) (struct rq *rq);
-       int  (*select_task_rq)(struct task_struct *p, int sync);
+       int  (*select_task_rq)(struct task_struct *p,
+                              struct sched_domain *sd, int sync);
 
        void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
 
Index: linux-compile.git/kernel/sched_fair.c
===================================================================
--- linux-compile.git.orig/kernel/sched_fair.c  2007-11-17 00:15:57.000000000 
-0500
+++ linux-compile.git/kernel/sched_fair.c       2007-11-17 00:43:44.000000000 
-0500
@@ -611,11 +611,12 @@ static inline int wake_idle(int cpu, str
 #endif
 
 #ifdef CONFIG_SMP
-static int select_task_rq_fair(struct task_struct *p, int sync)
+static int select_task_rq_fair(struct task_struct *p,
+                              struct sched_domain *this_sd, int sync)
 {
        int cpu, this_cpu;
        struct rq *rq;
-       struct sched_domain *sd, *this_sd = NULL;
+       struct sched_domain *sd;
        int new_cpu;
 
        cpu      = task_cpu(p);
@@ -623,15 +624,23 @@ static int select_task_rq_fair(struct ta
        this_cpu = smp_processor_id();
        new_cpu  = cpu;
 
+       if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
+               goto out_set_cpu;
+
+#ifndef CONFIG_SCHEDSTATS
+       /*
+        * If SCHEDSTATS is configured, then this_sd would
+        * have already been determined.
+        */
        for_each_domain(this_cpu, sd) {
                if (cpu_isset(cpu, sd->span)) {
                        this_sd = sd;
                        break;
                }
        }
-
-       if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
-               goto out_set_cpu;
+#else
+       (void)sd; /* unused */
+#endif /* CONFIG_SCHEDSTATS */
 
        /*
         * Check for affine wakeup and passive balancing possibilities.
Index: linux-compile.git/kernel/sched_idletask.c
===================================================================
--- linux-compile.git.orig/kernel/sched_idletask.c      2007-11-17 
00:15:57.000000000 -0500
+++ linux-compile.git/kernel/sched_idletask.c   2007-11-17 00:15:57.000000000 
-0500
@@ -6,7 +6,8 @@
  */
 
 #ifdef CONFIG_SMP
-static int select_task_rq_idle(struct task_struct *p, int sync)
+static int select_task_rq_idle(struct task_struct *p,
+                              struct sched_domain *sd, int sync)
 {
        return task_cpu(p); /* IDLE tasks as never migrated */
 }
Index: linux-compile.git/kernel/sched_rt.c
===================================================================
--- linux-compile.git.orig/kernel/sched_rt.c    2007-11-17 00:15:57.000000000 
-0500
+++ linux-compile.git/kernel/sched_rt.c 2007-11-17 00:44:31.000000000 -0500
@@ -46,7 +46,8 @@ static void update_rt_migration(struct r
 
 static int find_lowest_rq(struct task_struct *task);
 
-static int select_task_rq_rt(struct task_struct *p, int sync)
+static int select_task_rq_rt(struct task_struct *p,
+                            struct sched_domain *sd, int sync)
 {
        struct rq *rq = task_rq(p);
 

-- 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to