]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] block: implement elv_insert and use it (fix ordcolor flipping bug)
authorTejun Heo <htejun@gmail.com>
Wed, 8 Feb 2006 09:01:31 +0000 (01:01 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 8 Feb 2006 15:52:58 +0000 (07:52 -0800)
q->ordcolor must only be flipped on initial queueing of a hardbarrier
request.

Constructing ordered sequence and requeueing used to pass through
__elv_add_request() which flips q->ordcolor when it sees a barrier
request.

This patch separates out elv_insert() from __elv_add_request() and uses
elv_insert() when constructing ordered sequence and requeueing.
elv_insert() inserts the given request at the specified position and
does nothing else.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Acked-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
block/elevator.c
block/ll_rw_blk.c
include/linux/elevator.h

index 2fc269f69726d67e7a8ab7bcf6a58e2a10af009b..24b702d649a953977cafb7125ca1b81d73947cc3 100644 (file)
@@ -293,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
 
        rq->flags &= ~REQ_STARTED;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
 
 static void elv_drain_elevator(request_queue_t *q)
@@ -310,41 +310,11 @@ static void elv_drain_elevator(request_queue_t *q)
        }
 }
 
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
-                      int plug)
+void elv_insert(request_queue_t *q, struct request *rq, int where)
 {
        struct list_head *pos;
        unsigned ordseq;
 
-       if (q->ordcolor)
-               rq->flags |= REQ_ORDERED_COLOR;
-
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
-               /*
-                * toggle ordered color
-                */
-               if (blk_barrier_rq(rq))
-                       q->ordcolor ^= 1;
-
-               /*
-                * barriers implicitly indicate back insertion
-                */
-               if (where == ELEVATOR_INSERT_SORT)
-                       where = ELEVATOR_INSERT_BACK;
-
-               /*
-                * this request is scheduling boundary, update end_sector
-                */
-               if (blk_fs_request(rq)) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = rq;
-               }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
-               where = ELEVATOR_INSERT_BACK;
-
-       if (plug)
-               blk_plug_device(q);
-
        rq->q = q;
 
        switch (where) {
@@ -425,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
        }
 }
 
+void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+                      int plug)
+{
+       if (q->ordcolor)
+               rq->flags |= REQ_ORDERED_COLOR;
+
+       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * toggle ordered color
+                */
+               if (blk_barrier_rq(rq))
+                       q->ordcolor ^= 1;
+
+               /*
+                * barriers implicitly indicate back insertion
+                */
+               if (where == ELEVATOR_INSERT_SORT)
+                       where = ELEVATOR_INSERT_BACK;
+
+               /*
+                * this request is scheduling boundary, update
+                * end_sector
+                */
+               if (blk_fs_request(rq)) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = rq;
+               }
+       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+               where = ELEVATOR_INSERT_BACK;
+
+       if (plug)
+               blk_plug_device(q);
+
+       elv_insert(q, rq, where);
+}
+
 void elv_add_request(request_queue_t *q, struct request *rq, int where,
                     int plug)
 {
index ee5ed98db4cd15ff2ab7f5ae5b3a893466691abe..03d9c82b0fe7911fd6e922f746249795ccb80d46 100644 (file)
@@ -454,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
        rq->end_io = end_io;
        q->prepare_flush_fn(q, rq);
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
 static inline struct request *start_ordered(request_queue_t *q,
@@ -490,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 
        if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
                queue_flush(q, QUEUE_ORDERED_PREFLUSH);
index 23fe746a1d514b8f912c7c6cfab8e53d5ba8e92d..18cf1f3e11845dd6ba28f81b3defd2e4ed52adff 100644 (file)
@@ -82,6 +82,7 @@ struct elevator_queue
 extern void elv_dispatch_sort(request_queue_t *, struct request *);
 extern void elv_add_request(request_queue_t *, struct request *, int, int);
 extern void __elv_add_request(request_queue_t *, struct request *, int, int);
+extern void elv_insert(request_queue_t *, struct request *, int);
 extern int elv_merge(request_queue_t *, struct request **, struct bio *);
 extern void elv_merge_requests(request_queue_t *, struct request *,
                               struct request *);