]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] cond_resched() fix
authorAndrew Morton <akpm@osdl.org>
Fri, 30 Jun 2006 08:56:00 +0000 (01:56 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 30 Jun 2006 18:25:38 +0000 (11:25 -0700)
Fix a bug identified by Zou Nan hai <nanhai.zou@intel.com>:

If the system is in state SYSTEM_BOOTING, and need_resched() is true,
cond_resched() returns true even though it didn't reschedule.  Consequently
need_resched() remains true and JBD locks up.

Fix that by teaching cond_resched() to only return true if it really did call
schedule().

cond_resched_lock() and cond_resched_softirq() have a problem too.  If we're
in SYSTEM_BOOTING state and need_resched() is true, these functions will drop
the lock and will then try to call schedule(), but the SYSTEM_BOOTING state
will prevent schedule() from being called.  So on return, need_resched() will
still be true, but cond_resched_lock() has to return 1 to tell the caller that
the lock was dropped.  The caller will probably lock up.

Bottom line: if these functions dropped the lock, they _must_ call schedule()
to clear need_resched().   Make it so.

Also, uninline __cond_resched().  It's largeish, and slowpath.

Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
kernel/sched.c

index 2629c1711fd62be84574153e0ae62077895f3b36..d5e37072ea54cb21af2b86925daf07677c27e920 100644 (file)
@@ -4386,7 +4386,16 @@ asmlinkage long sys_sched_yield(void)
        return 0;
 }
 
-static inline void __cond_resched(void)
+static inline int __resched_legal(void)
+{
+       if (unlikely(preempt_count()))
+               return 0;
+       if (unlikely(system_state != SYSTEM_RUNNING))
+               return 0;
+       return 1;
+}
+
+static void __cond_resched(void)
 {
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
        __might_sleep(__FILE__, __LINE__);
@@ -4396,10 +4405,6 @@ static inline void __cond_resched(void)
         * PREEMPT_ACTIVE, which could trigger a second
         * cond_resched() call.
         */
-       if (unlikely(preempt_count()))
-               return;
-       if (unlikely(system_state != SYSTEM_RUNNING))
-               return;
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                schedule();
@@ -4409,13 +4414,12 @@ static inline void __cond_resched(void)
 
 int __sched cond_resched(void)
 {
-       if (need_resched()) {
+       if (need_resched() && __resched_legal()) {
                __cond_resched();
                return 1;
        }
        return 0;
 }
-
 EXPORT_SYMBOL(cond_resched);
 
 /*
@@ -4436,7 +4440,7 @@ int cond_resched_lock(spinlock_t *lock)
                ret = 1;
                spin_lock(lock);
        }
-       if (need_resched()) {
+       if (need_resched() && __resched_legal()) {
                _raw_spin_unlock(lock);
                preempt_enable_no_resched();
                __cond_resched();
@@ -4445,14 +4449,13 @@ int cond_resched_lock(spinlock_t *lock)
        }
        return ret;
 }
-
 EXPORT_SYMBOL(cond_resched_lock);
 
 int __sched cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (need_resched()) {
+       if (need_resched() && __resched_legal()) {
                __local_bh_enable();
                __cond_resched();
                local_bh_disable();
@@ -4460,10 +4463,8 @@ int __sched cond_resched_softirq(void)
        }
        return 0;
 }
-
 EXPORT_SYMBOL(cond_resched_softirq);
 
-
 /**
  * yield - yield the current processor to other threads.
  *