]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] sched: disable interrupts for locking in load_balance()
authorChristoph Lameter <clameter@sgi.com>
Sun, 10 Dec 2006 10:20:21 +0000 (02:20 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Sun, 10 Dec 2006 17:55:42 +0000 (09:55 -0800)
Interrupts must be disabled for request queue locks if we want to run
load_balance() with interrupts enabled.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
kernel/sched.c

index b5b350135002c193052ed3d57834b08a3bda2dfc..d327511d268e062458372ae543b45a7fd9290f2b 100644 (file)
@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
 /*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
- *
- * Called with this_rq unlocked.
  */
 static int load_balance(int this_cpu, struct rq *this_rq,
                        struct sched_domain *sd, enum idle_type idle)
@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        unsigned long imbalance;
        struct rq *busiest;
        cpumask_t cpus = CPU_MASK_ALL;
+       unsigned long flags;
 
        /*
         * When power savings policy is enabled for the parent domain, idle
@@ -2596,11 +2595,13 @@ redo:
                 * still unbalanced. nr_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
+               local_irq_save(flags);
                double_rq_lock(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                      minus_1_or_zero(busiest->nr_running),
                                      imbalance, sd, idle, &all_pinned);
                double_rq_unlock(this_rq, busiest);
+               local_irq_restore(flags);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
                if (unlikely(all_pinned)) {
@@ -2617,13 +2618,13 @@ redo:
 
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
-                       spin_lock(&busiest->lock);
+                       spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
                         * task on busiest cpu can't be moved to this_cpu
                         */
                        if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
-                               spin_unlock(&busiest->lock);
+                               spin_unlock_irqrestore(&busiest->lock, flags);
                                all_pinned = 1;
                                goto out_one_pinned;
                        }
@@ -2633,7 +2634,7 @@ redo:
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
-                       spin_unlock(&busiest->lock);
+                       spin_unlock_irqrestore(&busiest->lock, flags);
                        if (active_balance)
                                wake_up_process(busiest->migration_thread);