]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] writeback: fix range handling
authorOGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Fri, 23 Jun 2006 09:03:26 +0000 (02:03 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 23 Jun 2006 14:42:49 +0000 (07:42 -0700)
When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request.  Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".

To make all this sane, the patch changes range of writeback_control.

So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.

And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.

This patch does,

    - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
      -1 is usually ok for range_end (type is long long). But, if someone did,

range_end += val; range_end is "val - 1"
u64val = range_end >> bits; u64val is "~(0ULL)"

      or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
      things, and uses LLONG_MAX for range_end.

    - All callers of ->writepages() sets range_start/end or range_cyclic.

    - Fix updates of ->writeback_index. It seems already bit strange.
      If it starts at 0 and ended by check of nr_to_write, this last
      index may reduce chance to scan end of file.  So, this updates
      ->writeback_index only if range_cyclic is true or whole-file is
      scanned.

Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
fs/cifs/file.c
fs/fs-writeback.c
fs/mpage.c
fs/sync.c
include/linux/kernel.h
include/linux/writeback.h
mm/filemap.c
mm/page-writeback.c
mm/vmscan.c

index e2b4ce1dad6680cc54d8af516e7cf876533361fd..487ea8b3baaa4f512db3b802b7329e79a7f94d00 100644 (file)
@@ -1079,9 +1079,9 @@ static int cifs_writepages(struct address_space *mapping,
        unsigned int bytes_written;
        struct cifs_sb_info *cifs_sb;
        int done = 0;
-       pgoff_t end = -1;
+       pgoff_t end;
        pgoff_t index;
-       int is_range = 0;
+       int range_whole = 0;
        struct kvec iov[32];
        int len;
        int n_iov = 0;
@@ -1122,16 +1122,14 @@ static int cifs_writepages(struct address_space *mapping,
        xid = GetXid();
 
        pagevec_init(&pvec, 0);
-       if (wbc->sync_mode == WB_SYNC_NONE)
+       if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
-       else {
-               index = 0;
-               scanned = 1;
-       }
-       if (wbc->start || wbc->end) {
-               index = wbc->start >> PAGE_CACHE_SHIFT;
-               end = wbc->end >> PAGE_CACHE_SHIFT;
-               is_range = 1;
+               end = -1;
+       } else {
+               index = wbc->range_start >> PAGE_CACHE_SHIFT;
+               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                       range_whole = 1;
                scanned = 1;
        }
 retry:
@@ -1167,7 +1165,7 @@ retry:
                                break;
                        }
 
-                       if (unlikely(is_range) && (page->index > end)) {
+                       if (!wbc->range_cyclic && page->index > end) {
                                done = 1;
                                unlock_page(page);
                                break;
@@ -1271,7 +1269,7 @@ retry:
                index = 0;
                goto retry;
        }
-       if (!is_range)
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
 
        FreeXid(xid);
index f3fbe2d030f4def8fae388ef49bc617bf251e913..6db95cf3aaa22dfd8abdfaae7a92313cf10520df 100644 (file)
@@ -461,6 +461,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
 {
        struct writeback_control wbc = {
                .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+               .range_start    = 0,
+               .range_end      = LLONG_MAX,
        };
        unsigned long nr_dirty = read_page_state(nr_dirty);
        unsigned long nr_unstable = read_page_state(nr_unstable);
@@ -559,6 +561,8 @@ int write_inode_now(struct inode *inode, int sync)
        struct writeback_control wbc = {
                .nr_to_write = LONG_MAX,
                .sync_mode = WB_SYNC_ALL,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
        };
 
        if (!mapping_cap_writeback_dirty(inode->i_mapping))
index 9bf2eb30e6f4e575c4e31874fad1bf2c48c66519..1e4598247d0b962f02eca7f390bf6683c9df3e4e 100644 (file)
@@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping,
        struct pagevec pvec;
        int nr_pages;
        pgoff_t index;
-       pgoff_t end = -1;               /* Inclusive */
+       pgoff_t end;            /* Inclusive */
        int scanned = 0;
-       int is_range = 0;
+       int range_whole = 0;
 
        if (wbc->nonblocking && bdi_write_congested(bdi)) {
                wbc->encountered_congestion = 1;
@@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping,
                writepage = mapping->a_ops->writepage;
 
        pagevec_init(&pvec, 0);
-       if (wbc->sync_mode == WB_SYNC_NONE) {
+       if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
+               end = -1;
        } else {
-               index = 0;                        /* whole-file sweep */
-               scanned = 1;
-       }
-       if (wbc->start || wbc->end) {
-               index = wbc->start >> PAGE_CACHE_SHIFT;
-               end = wbc->end >> PAGE_CACHE_SHIFT;
-               is_range = 1;
+               index = wbc->range_start >> PAGE_CACHE_SHIFT;
+               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                       range_whole = 1;
                scanned = 1;
        }
 retry:
@@ -759,7 +757,7 @@ retry:
                                continue;
                        }
 
-                       if (unlikely(is_range) && page->index > end) {
+                       if (!wbc->range_cyclic && page->index > end) {
                                done = 1;
                                unlock_page(page);
                                continue;
@@ -810,7 +808,7 @@ retry:
                index = 0;
                goto retry;
        }
-       if (!is_range)
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
        if (bio)
                mpage_bio_submit(WRITE, bio);
index aab5ffe77e9fd67e3ac9173a4cb9488fd393f074..955aef04da289fecb80f51981ae98f538e6bcdc2 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -100,7 +100,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
        }
 
        if (nbytes == 0)
-               endbyte = -1;
+               endbyte = LLONG_MAX;
        else
                endbyte--;              /* inclusive */
 
index f4fc576ed4c40c6d28dd940a869801a138a0a5e6..25fccd859fbf311d1026aa6ff3a1e5c171cd85fd 100644 (file)
@@ -24,6 +24,9 @@ extern const char linux_banner[];
 #define LONG_MAX       ((long)(~0UL>>1))
 #define LONG_MIN       (-LONG_MAX - 1)
 #define ULONG_MAX      (~0UL)
+#define LLONG_MAX      ((long long)(~0ULL>>1))
+#define LLONG_MIN      (-LLONG_MAX - 1)
+#define ULLONG_MAX     (~0ULL)
 
 #define STACK_MAGIC    0xdeadbeef
 
index 56f92fcbe94aa9a82508b5db1a878c43cd348699..9e38b566d0e77325d50fb9bfd52b28898c753349 100644 (file)
@@ -50,14 +50,15 @@ struct writeback_control {
         * a hint that the filesystem need only write out the pages inside that
         * byterange.  The byte at `end' is included in the writeout request.
         */
-       loff_t start;
-       loff_t end;
+       loff_t range_start;
+       loff_t range_end;
 
        unsigned nonblocking:1;         /* Don't get stuck on request queues */
        unsigned encountered_congestion:1; /* An output: a queue is full */
        unsigned for_kupdate:1;         /* A kupdate writeback */
        unsigned for_reclaim:1;         /* Invoked from the page allocator */
        unsigned for_writepages:1;      /* This is a writepages() call */
+       unsigned range_cyclic:1;        /* range_start is cyclic */
 };
 
 /*
index fd57442186cbec1b55654639415af850ba352a2f..3342067ca436e33a8399664a35c7265986272427 100644 (file)
@@ -190,8 +190,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
                .nr_to_write = mapping->nrpages * 2,
-               .start = start,
-               .end = end,
+               .range_start = start,
+               .range_end = end,
        };
 
        if (!mapping_cap_writeback_dirty(mapping))
@@ -204,7 +204,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 static inline int __filemap_fdatawrite(struct address_space *mapping,
        int sync_mode)
 {
-       return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
+       return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 }
 
 int filemap_fdatawrite(struct address_space *mapping)
index 75d7f48b79bba537d522cbd709138d48196d6782..8ccf6f1b1473c1e26e0da73e5f8f95c4b041def9 100644 (file)
@@ -204,6 +204,7 @@ static void balance_dirty_pages(struct address_space *mapping)
                        .sync_mode      = WB_SYNC_NONE,
                        .older_than_this = NULL,
                        .nr_to_write    = write_chunk,
+                       .range_cyclic   = 1,
                };
 
                get_dirty_limits(&wbs, &background_thresh,
@@ -331,6 +332,7 @@ static void background_writeout(unsigned long _min_pages)
                .older_than_this = NULL,
                .nr_to_write    = 0,
                .nonblocking    = 1,
+               .range_cyclic   = 1,
        };
 
        for ( ; ; ) {
@@ -407,6 +409,7 @@ static void wb_kupdate(unsigned long arg)
                .nr_to_write    = 0,
                .nonblocking    = 1,
                .for_kupdate    = 1,
+               .range_cyclic   = 1,
        };
 
        sync_supers();
index 46be8a02280e70f1526e518a820340977a3afc69..bc5d4f43036cc100d566b0628d89b8433104224f 100644 (file)
@@ -339,6 +339,8 @@ pageout_t pageout(struct page *page, struct address_space *mapping)
                struct writeback_control wbc = {
                        .sync_mode = WB_SYNC_NONE,
                        .nr_to_write = SWAP_CLUSTER_MAX,
+                       .range_start = 0,
+                       .range_end = LLONG_MAX,
                        .nonblocking = 1,
                        .for_reclaim = 1,
                };