From da20a20f3b5c175648fa797c899dd577e4dacb51 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 21 Jul 2006 20:30:28 +0200 Subject: [PATCH] [PATCH] ll_rw_blk: allow more flexibility for read_ahead_kb store It can make sense to set read-ahead larger than a single request. We should not be enforcing such policy on the user. Additionally, using the BLKRASET ioctl doesn't impose such a restriction. So additionally we now expose identical behaviour through the two. Issue also reported by Anton Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 346be9ae31f..e3980ec747c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3806,9 +3806,6 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) ssize_t ret = queue_var_store(&ra_kb, page, count); spin_lock_irq(q->queue_lock); - if (ra_kb > (q->max_sectors >> 1)) - ra_kb = (q->max_sectors >> 1); - q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); spin_unlock_irq(q->queue_lock); -- 2.41.1