#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
 /*
- * Get a free request, queue_lock must not be held
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
  */
 static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
                                   int gfp_mask)
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
-       struct io_context *ioc = get_io_context(gfp_mask);
+       struct io_context *ioc = get_io_context(GFP_ATOMIC);
 
        if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
                goto out;
 
-       spin_lock_irq(q->queue_lock);
        if (rl->count[rw]+1 >= q->nr_requests) {
                /*
                 * The queue will fill after this allocation, so set it as
                 * The queue is full and the allocating process is not a
                 * "batcher", and not exempted by the IO scheduler
                 */
-               spin_unlock_irq(q->queue_lock);
                goto out;
        }
 
                if (unlikely(rl->count[rw] == 0))
                        rl->starved[rw] = 1;
 
-               spin_unlock_irq(q->queue_lock);
                goto out;
        }
 
 /*
  * No available requests for this queue, unplug the device and wait for some
  * requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
  */
 static struct request *get_request_wait(request_queue_t *q, int rw,
                                        struct bio *bio)
                if (!rq) {
                        struct io_context *ioc;
 
-                       generic_unplug_device(q);
+                       __generic_unplug_device(q);
+                       spin_unlock_irq(q->queue_lock);
                        io_schedule();
 
                        /*
                        ioc = get_io_context(GFP_NOIO);
                        ioc_set_batching(q, ioc);
                        put_io_context(ioc);
+
+                       spin_lock_irq(q->queue_lock);
                }
                finish_wait(&rl->wait[rw], &wait);
        }
 
        BUG_ON(rw != READ && rw != WRITE);
 
-       if (gfp_mask & __GFP_WAIT)
+       spin_lock_irq(q->queue_lock);
+       if (gfp_mask & __GFP_WAIT) {
                rq = get_request_wait(q, rw, NULL);
-       else
+       } else {
                rq = get_request(q, rw, NULL, gfp_mask);
+               if (!rq)
+                       spin_unlock_irq(q->queue_lock);
+       }
+       /* q->queue_lock is unlocked at this point */
 
        return rq;
 }
-
 EXPORT_SYMBOL(blk_get_request);
 
 /**
 get_rq:
        /*
         * Grab a free request. This is might sleep but can not fail.
+        * Returns with the queue unlocked.
         */
-       spin_unlock_irq(q->queue_lock);
        req = get_request_wait(q, rw, bio);
+
        /*
         * After dropping the lock and possibly sleeping here, our request
         * may now be mergeable after it had proven unmergeable (above).