]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
mutex: preemption fixes
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 14 Jan 2009 14:36:26 +0000 (15:36 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 14 Jan 2009 17:09:00 +0000 (18:09 +0100)
The problem is that dropping the spinlock right before schedule is a voluntary
preemption point and can cause a schedule, right after which we schedule again.

Fix this inefficiency by keeping preemption disabled until we schedule, do this
by explicity disabling preemption and providing a schedule() variant that
assumes preemption is already disabled.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/mutex.c
kernel/sched.c

index 4cae9b81a1f8851d51a5380d8d37fa7ba3ceb529..9f0b372cfa6f8ac2c039cace94cb386a5a826ec1 100644 (file)
@@ -328,6 +328,7 @@ extern signed long schedule_timeout(signed long timeout);
 extern signed long schedule_timeout_interruptible(signed long timeout);
 extern signed long schedule_timeout_killable(signed long timeout);
 extern signed long schedule_timeout_uninterruptible(signed long timeout);
+asmlinkage void __schedule(void);
 asmlinkage void schedule(void);
 
 struct nsproxy;
index 357c6d221efe5a3988565abf1321ed7f155e20a5..524ffc33dc05cf503f984bc6736557f031186d62 100644 (file)
@@ -131,6 +131,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        struct mutex_waiter waiter;
        unsigned long flags;
 
+       preempt_disable();
        spin_lock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_lock_common(lock, &waiter);
@@ -170,13 +171,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                        spin_unlock_mutex(&lock->wait_lock, flags);
 
                        debug_mutex_free_waiter(&waiter);
+                       preempt_enable();
                        return -EINTR;
                }
                __set_task_state(task, state);
 
                /* didnt get the lock, go to sleep: */
                spin_unlock_mutex(&lock->wait_lock, flags);
-               schedule();
+               __schedule();
                spin_lock_mutex(&lock->wait_lock, flags);
        }
 
@@ -193,6 +195,7 @@ done:
        spin_unlock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_free_waiter(&waiter);
+       preempt_enable();
 
        return 0;
 }
index 8be2c13b50d018cc69280829e14e31612d4deddd..b001c133c359136f0d282a97cbe4989d8980472e 100644 (file)
@@ -4538,15 +4538,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
 /*
  * schedule() is the main scheduler function.
  */
-asmlinkage void __sched schedule(void)
+asmlinkage void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
        struct rq *rq;
        int cpu;
 
-need_resched:
-       preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_qsctr_inc(cpu);
@@ -4603,7 +4601,13 @@ need_resched_nonpreemptible:
 
        if (unlikely(reacquire_kernel_lock(current) < 0))
                goto need_resched_nonpreemptible;
+}
 
+asmlinkage void __sched schedule(void)
+{
+need_resched:
+       preempt_disable();
+       __schedule();
        preempt_enable_no_resched();
        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
                goto need_resched;