]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
block: Skip I/O merges when disabled
authorAlan D. Brunelle <Alan.Brunelle@hp.com>
Tue, 29 Apr 2008 12:44:19 +0000 (14:44 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Tue, 29 Apr 2008 12:48:55 +0000 (14:48 +0200)
The block I/O + elevator + I/O scheduler code spend a lot of time trying
to merge I/Os -- rightfully so under "normal" circumstances. However,
if one were to know that the incoming I/O stream was /very/ random in
nature, the cycles are wasted.

This patch adds a per-request_queue tunable that (when set) disables
merge attempts (beyond the simple one-hit cache check), thus freeing up
a non-trivial amount of CPU cycles.

Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/blk-sysfs.c
block/elevator.c
include/linux/blkdev.h

index fc41d83be22bb02bab61f09b4b356dbba4697a05..e85c4013e8a29a5924c4657447a2b6611eb17702 100644 (file)
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+                                   size_t count)
+{
+       unsigned long nm;
+       ssize_t ret = queue_var_store(&nm, page, count);
+
+       if (nm)
+              set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+       else
+              clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+
+       return ret;
+}
+
 
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
        .show = queue_hw_sector_size_show,
 };
 
+static struct queue_sysfs_entry queue_nomerges_entry = {
+       .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_nomerges_show,
+       .store = queue_nomerges_store,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
        &queue_max_sectors_entry.attr,
        &queue_iosched_entry.attr,
        &queue_hw_sector_size_entry.attr,
+       &queue_nomerges_entry.attr,
        NULL,
 };
 
index 7253fa05db0a202a020cc8d57759c038bc890d22..ac5310ef8270984e32799e0524917abf4246853a 100644 (file)
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
                }
        }
 
+       if (blk_queue_nomerges(q))
+               return ELEVATOR_NO_MERGE;
+
        /*
         * See if our hash lookup can find a potential backmerge.
         */
index 08df1ea8bac4f6b44f13de448f69940edcb52ee5..c09696a90d6a2149e22088a8ead1cecb9f5a082b 100644 (file)
@@ -408,6 +408,7 @@ struct request_queue
 #define QUEUE_FLAG_PLUGGED     7       /* queue is plugged */
 #define QUEUE_FLAG_ELVSWITCH   8       /* don't use elevator, just do FIFO */
 #define QUEUE_FLAG_BIDI                9       /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES    10      /* disable merge attempts */
 
 static inline void queue_flag_set_unlocked(unsigned int flag,
                                           struct request_queue *q)
@@ -476,6 +477,7 @@ enum {
 #define blk_queue_plugged(q)   test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_flushing(q)  ((q)->ordseq)
 
 #define blk_fs_request(rq)     ((rq)->cmd_type == REQ_TYPE_FS)