]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
readahead: combine file_ra_state.prev_index/prev_offset into prev_pos
authorFengguang Wu <wfg@mail.ustc.edu.cn>
Tue, 16 Oct 2007 08:24:33 +0000 (01:24 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 16:42:52 +0000 (09:42 -0700)
Combine the file_ra_state members
unsigned long prev_index
unsigned int prev_offset
into
loff_t prev_pos

It is more consistent and better supports huge files.

Thanks to Peter for the nice proposal!

[akpm@linux-foundation.org: fix shift overflow]
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/ext3/dir.c
fs/ext4/dir.c
fs/splice.c
include/linux/fs.h
mm/filemap.c
mm/readahead.c

index c00723a99f44f1aa6cb808a1e3ffd001505e67a2..c2c3491b18cf147df1e7341b89749ded292d36c0 100644 (file)
@@ -143,7 +143,7 @@ static int ext3_readdir(struct file * filp,
                                        sb->s_bdev->bd_inode->i_mapping,
                                        &filp->f_ra, filp,
                                        index, 1);
-                       filp->f_ra.prev_index = index;
+                       filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
                        bh = ext3_bread(NULL, inode, blk, 0, &err);
                }
 
index 3ab01c04e00c55fd07470a7de9a85d1cbf398815..e11890acfa21e5199a425eacb2e662c2402ab9bf 100644 (file)
@@ -142,7 +142,7 @@ static int ext4_readdir(struct file * filp,
                                        sb->s_bdev->bd_inode->i_mapping,
                                        &filp->f_ra, filp,
                                        index, 1);
-                       filp->f_ra.prev_index = index;
+                       filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
                        bh = ext4_bread(NULL, inode, blk, 0, &err);
                }
 
index e95a36228863affa3d6c19a37ac4766f8e147412..2df6be43c667c0cc530f934325fd15afea2aed99 100644 (file)
@@ -447,7 +447,7 @@ fill_it:
         */
        while (page_nr < nr_pages)
                page_cache_release(pages[page_nr++]);
-       in->f_ra.prev_index = index;
+       in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
 
        if (spd.nr_pages)
                return splice_to_pipe(pipe, &spd);
index 8250811081ff09e7f0a3dd55d5d019402aeb5ffd..500ffc0e4ac7856f4cd9487794f2cdd9ed35195e 100644 (file)
@@ -704,8 +704,7 @@ struct file_ra_state {
 
        unsigned int ra_pages;          /* Maximum readahead window */
        int mmap_miss;                  /* Cache miss stat for mmap accesses */
-       unsigned long prev_index;       /* Cache last read() position */
-       unsigned int prev_offset;       /* Offset where last read() ended in a page */
+       loff_t prev_pos;                /* Cache last read() position */
 };
 
 /*
index 5dc18d76e7032f390aa2b1686f3ee1d9664d1e2c..bbcca456d8a653399b4adab4184c6ff84e2ed1f4 100644 (file)
@@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping,
        cached_page = NULL;
        index = *ppos >> PAGE_CACHE_SHIFT;
        next_index = index;
-       prev_index = ra.prev_index;
-       prev_offset = ra.prev_offset;
+       prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
+       prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
        last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
        offset = *ppos & ~PAGE_CACHE_MASK;
 
@@ -966,7 +966,6 @@ page_ok:
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
                prev_offset = offset;
-               ra.prev_offset = offset;
 
                page_cache_release(page);
                if (ret == nr && desc->count)
@@ -1056,9 +1055,11 @@ no_cached_page:
 
 out:
        *_ra = ra;
-       _ra->prev_index = prev_index;
+       _ra->prev_pos = prev_index;
+       _ra->prev_pos <<= PAGE_CACHE_SHIFT;
+       _ra->prev_pos |= prev_offset;
 
-       *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+       *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
        if (cached_page)
                page_cache_release(cached_page);
        if (filp)
@@ -1396,7 +1397,7 @@ retry_find:
         * Found the page and have a reference on it.
         */
        mark_page_accessed(page);
-       ra->prev_index = page->index;
+       ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
 
index d2504877b26929b00db9e2bd4e2afc297a61ecce..4a58befbde4a5f157f6c5372d0420ea9c5390e71 100644 (file)
@@ -46,7 +46,7 @@ void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
 {
        ra->ra_pages = mapping->backing_dev_info->ra_pages;
-       ra->prev_index = -1;
+       ra->prev_pos = -1;
 }
 EXPORT_SYMBOL_GPL(file_ra_state_init);
 
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
  * indicator. The flag won't be set on already cached pages, to avoid the
  * readahead-for-nothing fuss, saving pointless page cache lookups.
  *
- * prev_index tracks the last visited page in the _previous_ read request.
+ * prev_pos tracks the last visited byte in the _previous_ read request.
  * It should be maintained by the caller, and will be used for detecting
  * small random reads. Note that the readahead algorithm checks loosely
  * for sequential patterns. Hence interleaved reads might be served as
@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
                   bool hit_readahead_marker, pgoff_t offset,
                   unsigned long req_size)
 {
-       int max;        /* max readahead pages */
-       int sequential;
-
-       max = ra->ra_pages;
-       sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
+       int     max = ra->ra_pages;     /* max readahead pages */
+       pgoff_t prev_offset;
+       int     sequential;
 
        /*
         * It's the expected callback offset, assume sequential access.
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
                goto readit;
        }
 
+       prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
+       sequential = offset - prev_offset <= 1UL || req_size > max;
+
        /*
         * Standalone, small read.
         * Read as is, and do not pollute the readahead state.