]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] move_task_off_dead_cpu() should be called with disabled ints
authorKirill Korotaev <dev@openvz.org>
Sun, 10 Dec 2006 10:20:11 +0000 (02:20 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Sun, 10 Dec 2006 17:55:42 +0000 (09:55 -0800)
move_task_off_dead_cpu() requires interrupts to be disabled, while
migrate_dead() calls it with enabled interrupts.  Added appropriate
comments to functions and added BUG_ON(!irqs_disabled()) into
double_rq_lock() and double_lock_balance() which are the origin sources of
such bugs.

Signed-off-by: Kirill Korotaev <dev@openvz.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
kernel/sched.c

index a08387b5f7fa276ac97a0b0135b70646d0810d71..f04add905bdfa349724dd29ebc2db056800dc240 100644 (file)
@@ -1952,6 +1952,7 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
        __acquires(rq1->lock)
        __acquires(rq2->lock)
 {
+       BUG_ON(!irqs_disabled());
        if (rq1 == rq2) {
                spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
@@ -1991,6 +1992,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
+       if (unlikely(!irqs_disabled())) {
+               /* printk() doesn't work good under rq->lock */
+               spin_unlock(&this_rq->lock);
+               BUG_ON(1);
+       }
        if (unlikely(!spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
                        spin_unlock(&this_rq->lock);
@@ -5067,7 +5073,10 @@ wait_to_die:
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-/* Figure out where task on dead CPU should go, use force if neccessary. */
+/*
+ * Figure out where task on dead CPU should go, use force if neccessary.
+ * NOTE: interrupts should be disabled by the caller
+ */
 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
        unsigned long flags;
@@ -5187,6 +5196,7 @@ void idle_task_exit(void)
        mmdrop(mm);
 }
 
+/* called under rq->lock with disabled interrupts */
 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
 {
        struct rq *rq = cpu_rq(dead_cpu);
@@ -5203,10 +5213,11 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
         * Drop lock around migration; if someone else moves it,
         * that's OK.  No task can be added to this CPU, so iteration is
         * fine.
+        * NOTE: interrupts should be left disabled  --dev@
         */
-       spin_unlock_irq(&rq->lock);
+       spin_unlock(&rq->lock);
        move_task_off_dead_cpu(dead_cpu, p);
-       spin_lock_irq(&rq->lock);
+       spin_lock(&rq->lock);
 
        put_task_struct(p);
 }