]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
workqueue: don't save interrupts in run_workqueue()
authorOleg Nesterov <oleg@tv-sign.ru>
Wed, 9 May 2007 09:34:10 +0000 (02:34 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 9 May 2007 19:30:52 +0000 (12:30 -0700)
work->func() may sleep, it's a bug to call run_workqueue() with irqs disabled.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/workqueue.c

index ea422254f8bf3a9eb463250570605e27fe362220..74f3f7825229b679935ce08084a2e3800c4cd5ef 100644 (file)
@@ -227,13 +227,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
-       unsigned long flags;
-
-       /*
-        * Keep taking off work from the queue until
-        * done.
-        */
-       spin_lock_irqsave(&cwq->lock, flags);
+       spin_lock_irq(&cwq->lock);
        cwq->run_depth++;
        if (cwq->run_depth > 3) {
                /* morton gets to eat his hat */
@@ -248,7 +242,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
 
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
-               spin_unlock_irqrestore(&cwq->lock, flags);
+               spin_unlock_irq(&cwq->lock);
 
                BUG_ON(get_wq_data(work) != cwq);
                if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
@@ -266,11 +260,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                        dump_stack();
                }
 
-               spin_lock_irqsave(&cwq->lock, flags);
+               spin_lock_irq(&cwq->lock);
                cwq->current_work = NULL;
        }
        cwq->run_depth--;
-       spin_unlock_irqrestore(&cwq->lock, flags);
+       spin_unlock_irq(&cwq->lock);
 }
 
 /*
@@ -399,6 +393,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  */
 void fastcall flush_workqueue(struct workqueue_struct *wq)
 {
+       might_sleep();
+
        if (is_single_threaded(wq))
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
        else {
@@ -445,6 +441,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
 {
        struct cpu_workqueue_struct *cwq;
 
+       might_sleep();
+
        cwq = get_wq_data(work);
        /* Was it ever queued ? */
        if (!cwq)