]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: kill task_group balancing
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Fri, 27 Jun 2008 11:41:20 +0000 (13:41 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 27 Jun 2008 12:31:33 +0000 (14:31 +0200)
The idea was to balance groups until we've reached the global goal, however
Vatsa rightly pointed out that we might never reach that goal this way -
hence take out this logic.

[ the initial rationale for this 'feature' was to promote max concurrency
  within a group - it does not however affect fairness ]

Reported-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 40cf24ab4de8c9f2eada01a22592c707b3e6fac0..b10c0d61a2a95faad30b4009e64908882a248d35 100644 (file)
@@ -1422,9 +1422,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 
        rcu_read_lock();
        list_for_each_entry(tg, &task_groups, list) {
-               long imbalance;
-               unsigned long this_weight, busiest_weight;
-               long rem_load, max_load, moved_load;
+               long rem_load, moved_load;
 
                /*
                 * empty group
@@ -1435,17 +1433,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;
                rem_load /= aggregate(tg, this_cpu)->load + 1;
 
-               this_weight = tg->cfs_rq[this_cpu]->task_weight;
-               busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
-
-               imbalance = (busiest_weight - this_weight) / 2;
-
-               if (imbalance < 0)
-                       imbalance = busiest_weight;
-
-               max_load = max(rem_load, imbalance);
                moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
-                               max_load, sd, idle, all_pinned, this_best_prio,
+                               rem_load, sd, idle, all_pinned, this_best_prio,
                                tg->cfs_rq[busiest_cpu]);
 
                if (!moved_load)