]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: fix load scaling in group balancing
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 27 Jun 2008 11:41:29 +0000 (13:41 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 27 Jun 2008 12:31:41 +0000 (14:31 +0200)
doing the load balance will change cfs_rq->load.weight (that's the whole point)
but since that's part of the scale factor, we'll scale back with a different
amount.

Weight getting smaller would result in an inflated moved_load which causes
it to stop balancing too soon.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 865cb53a7ccf1776c0abcdc86438c2a84a1ac930..734e4c556fcba1d0309531f48792d8ea4711cee7 100644 (file)
@@ -1444,6 +1444,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 
        list_for_each_entry(tg, &task_groups, list) {
                struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
+               unsigned long busiest_h_load = busiest_cfs_rq->h_load;
+               unsigned long busiest_weight = busiest_cfs_rq->load.weight;
                long rem_load, moved_load;
 
                /*
@@ -1452,8 +1454,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                if (!busiest_cfs_rq->task_weight)
                        continue;
 
-               rem_load = rem_load_move * busiest_cfs_rq->load.weight;
-               rem_load /= busiest_cfs_rq->h_load + 1;
+               rem_load = rem_load_move * busiest_weight;
+               rem_load /= busiest_h_load + 1;
 
                moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
                                rem_load, sd, idle, all_pinned, this_best_prio,
@@ -1462,8 +1464,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                if (!moved_load)
                        continue;
 
-               moved_load *= busiest_cfs_rq->h_load;
-               moved_load /= busiest_cfs_rq->load.weight + 1;
+               moved_load *= busiest_h_load;
+               moved_load /= busiest_weight + 1;
 
                rem_load_move -= moved_load;
                if (rem_load_move < 0)