1/2

--
SUSE Labs, Novell Inc.

Similarly to the earlier change in load_balance, only lock the runqueue
in load_balance_newidle if the busiest queue found has a nr_running > 1.
This will reduce frequency of expensive remote runqueue lock aquisitions
in the schedule() path on some workloads.

Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c       2005-08-02 21:35:36.000000000 +1000
+++ linux-2.6/kernel/sched.c    2005-08-02 21:56:40.000000000 +1000
@@ -2080,8 +2080,7 @@ static int load_balance(int this_cpu, ru
                 */
                double_lock_balance(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
-                                               imbalance, sd, idle,
-                                               &all_pinned);
+                                       imbalance, sd, idle, &all_pinned);
                spin_unlock(&busiest->lock);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
@@ -2176,18 +2175,22 @@ static int load_balance_newidle(int this
 
        BUG_ON(busiest == this_rq);
 
-       /* Attempt to move tasks */
-       double_lock_balance(this_rq, busiest);
-
        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
-       nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+       nr_moved = 0;
+       if (busiest->nr_running > 1) {
+               /* Attempt to move tasks */
+               double_lock_balance(this_rq, busiest);
+               nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, NEWLY_IDLE, NULL);
+               spin_unlock(&busiest->lock);
+       }
+
        if (!nr_moved)
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
        else
                sd->nr_balance_failed = 0;
 
-       spin_unlock(&busiest->lock);
        return nr_moved;
 
 out_balanced:

Reply via email to