int                     i;
 
        BUG_ON(PageWriteback(page));
-       set_page_writeback(page);
+       if (bh_count)
+               set_page_writeback(page);
        if (clear_dirty)
                clear_page_dirty(page);
        unlock_page(page);
 
                if (probed_page && clear_dirty)
                        wbc->nr_to_write--;     /* Wrote an "extra" page */
-       } else {
-               end_page_writeback(page);
-               wbc->pages_skipped++;   /* We didn't write this page */
        }
 }
 
 {
        struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
        xfs_iomap_t             *mp = iomapp, *tmp;
-       unsigned long           end, offset;
-       pgoff_t                 end_index;
-       int                     i = 0, index = 0;
+       unsigned long           offset, end_offset;
+       int                     index = 0;
        int                     bbits = inode->i_blkbits;
+       int                     len, page_dirty;
 
-       end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-       if (page->index < end_index) {
-               end = PAGE_CACHE_SIZE;
-       } else {
-               end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
-       }
+       end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
+
+       /*
+        * page_dirty is initially a count of buffers on the page before
+        * EOF and is decrememted as we move each into a cleanable state.
+        */
+       len = 1 << inode->i_blkbits;
+       end_offset = max(end_offset, PAGE_CACHE_SIZE);
+       end_offset = roundup(end_offset, len);
+       page_dirty = end_offset / len;
+
+       offset = 0;
        bh = head = page_buffers(page);
        do {
-               offset = i << bbits;
-               if (offset >= end)
+               if (offset >= end_offset)
                        break;
                if (!(PageUptodate(page) || buffer_uptodate(bh)))
                        continue;
                        if (startio) {
                                lock_buffer(bh);
                                bh_arr[index++] = bh;
+                               page_dirty--;
                        }
                        continue;
                }
                        unlock_buffer(bh);
                        mark_buffer_dirty(bh);
                }
-       } while (i++, (bh = bh->b_this_page) != head);
+               page_dirty--;
+       } while (offset += len, (bh = bh->b_this_page) != head);
 
-       if (startio) {
-               xfs_submit_page(page, wbc, bh_arr, index, 1, index == i);
+       if (startio && index) {
+               xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
        } else {
                unlock_page(page);
        }
                }
        }
 
-       offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
        end_offset = min_t(unsigned long long,
-                       offset + PAGE_CACHE_SIZE, i_size_read(inode));
-
-       bh = head = page_buffers(page);
-       iomp = NULL;
+                       (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
+       offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
 
        /*
-        * page_dirty is initially a count of buffers on the page and
-        * is decrememted as we move each into a cleanable state.
+        * page_dirty is initially a count of buffers on the page before
+        * EOF and is decrememted as we move each into a cleanable state.
         */
-       len = bh->b_size;
-       page_dirty = PAGE_CACHE_SIZE / len;
+       len = 1 << inode->i_blkbits;
+       p_offset = max(p_offset, PAGE_CACHE_SIZE);
+       p_offset = roundup(p_offset, len);
+       page_dirty = p_offset / len;
+
+       iomp = NULL;
+       p_offset = 0;
+       bh = head = page_buffers(page);
 
        do {
                if (offset >= end_offset)
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
-       if (startio)
-               xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1);
+       if (startio) {
+               WARN_ON(page_dirty);
+               xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
+       }
 
        if (iomp) {
                offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
 
                        break;
                }
 
-               error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
+               error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count,
+                                                &imap, &nimaps);
                break;
        case BMAPI_UNWRITTEN:
                lockmode = 0;
 int
 xfs_iomap_write_allocate(
        xfs_inode_t     *ip,
+       loff_t          offset,
+       size_t          count,
        xfs_bmbt_irec_t *map,
        int             *retmap)
 {
        if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
                return XFS_ERROR(error);
 
-       offset_fsb = map->br_startoff;
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
        count_fsb = map->br_blockcount;
-       map_start_fsb = offset_fsb;
+       map_start_fsb = map->br_startoff;
 
        XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
 
                                        imap[i].br_startoff,
                                        imap[i].br_blockcount,imap[i].br_state);
                         }
-                       if ((map->br_startoff >= imap[i].br_startoff) &&
-                           (map->br_startoff < (imap[i].br_startoff +
-                                                imap[i].br_blockcount))) {
+                       if ((offset_fsb >= imap[i].br_startoff) &&
+                           (offset_fsb < (imap[i].br_startoff +
+                                          imap[i].br_blockcount))) {
                                *map = imap[i];
                                *retmap = 1;
                                XFS_STATS_INC(xs_xstrat_quick);
                 * file, just surrounding data, try again.
                 */
                nimaps--;
-               offset_fsb = imap[nimaps].br_startoff +
-                            imap[nimaps].br_blockcount;
-               map_start_fsb = offset_fsb;
+               map_start_fsb = imap[nimaps].br_startoff +
+                               imap[nimaps].br_blockcount;
        }
 
 trans_cancel:
 
  *
  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
  */
-
-
-
 #ifndef __XFS_IOMAP_H__
 #define __XFS_IOMAP_H__
 
        BMAPI_UNWRITTEN  = (1 << 3),    /* unwritten extents to real extents */
        /* modifiers */
        BMAPI_IGNSTATE = (1 << 4),      /* ignore unwritten state on read */
-       BMAPI_DIRECT = (1 << 5),                /* direct instead of buffered write */
+       BMAPI_DIRECT = (1 << 5),        /* direct instead of buffered write */
        BMAPI_MMAP = (1 << 6),          /* allocate for mmap write */
        BMAPI_SYNC = (1 << 7),          /* sync write to flush delalloc space */
        BMAPI_TRYLOCK = (1 << 8),       /* non-blocking request */
                                  int, struct xfs_bmbt_irec *, int *, int);
 extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, int,
                                 struct xfs_bmbt_irec *, int *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *,
+extern int xfs_iomap_write_allocate(struct xfs_inode *, loff_t, size_t,
                                struct xfs_bmbt_irec *, int *);
 extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t);
 
 
                                void *, loff_t, size_t, int,
                                struct xfs_bmbt_irec *, int *);
 typedef int            (*xfs_iomap_write_allocate_t)(
-                               void *, struct xfs_bmbt_irec *, int *);
+                               void *, loff_t, size_t,
+                               struct xfs_bmbt_irec *, int *);
 typedef int            (*xfs_iomap_write_unwritten_t)(
                                void *, loff_t, size_t);
 typedef uint           (*xfs_lck_map_shared_t)(void *);
 #define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
        (*(mp)->m_io_ops.xfs_iomap_write_delay) \
                ((io)->io_obj, offset, count, flags, mval, nmap)
-#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
+#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, mval, nmap) \
        (*(mp)->m_io_ops.xfs_iomap_write_allocate) \
-               ((io)->io_obj, mval, nmap)
+               ((io)->io_obj, offset, count, mval, nmap)
 #define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
        (*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
                ((io)->io_obj, offset, count)