]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
mm: page_mkwrite change prototype to match fault
authorNick Piggin <npiggin@suse.de>
Tue, 31 Mar 2009 22:23:21 +0000 (15:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Apr 2009 15:59:14 +0000 (08:59 -0700)
Change the page_mkwrite prototype to take a struct vm_fault, and return
VM_FAULT_xxx flags.  There should be no functional change.

This makes it possible to return much more detailed error information to
the VM (and also can provide more information eg.  virtual_address to the
driver, which might be important in some special cases).

This is required for a subsequent fix.  And will also make it easier to
merge page_mkwrite() with fault() in future.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <joel.becker@oracle.com>
Cc: Artem Bityutskiy <dedekind@infradead.org>
Cc: Felix Blyakher <felixb@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
16 files changed:
Documentation/filesystems/Locking
drivers/video/fb_defio.c
fs/btrfs/ctree.h
fs/btrfs/inode.c
fs/buffer.c
fs/ext4/ext4.h
fs/ext4/inode.c
fs/fuse/file.c
fs/gfs2/ops_file.c
fs/nfs/file.c
fs/ocfs2/mmap.c
fs/ubifs/file.c
fs/xfs/linux-2.6/xfs_file.c
include/linux/buffer_head.h
include/linux/mm.h
mm/memory.c

index 4e78ce6778435aacf321de21baffe4095739f2e9..76efe5b71d7d8f744739a129f4b5333b898acf5c 100644 (file)
@@ -505,7 +505,7 @@ prototypes:
        void (*open)(struct vm_area_struct*);
        void (*close)(struct vm_area_struct*);
        int (*fault)(struct vm_area_struct*, struct vm_fault *);
-       int (*page_mkwrite)(struct vm_area_struct *, struct page *);
+       int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
        int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
 
 locking rules:
index 082026546aee979e9bee1f2e8f4a440df02d20a6..0a7a6679ee6eb05e2be754ff816ba3547a0f7fdb 100644 (file)
@@ -85,8 +85,9 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
 
 /* vm_ops->page_mkwrite handler */
 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
-                                 struct page *page)
+                                 struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        struct fb_info *info = vma->vm_private_data;
        struct fb_deferred_io *fbdefio = info->fbdefio;
        struct page *cur;
index 5e1d4e30e9d863a6c66c36549136bb9abd088738..7dd1b6d0bf323cf7e006fd309e76fc61bccfa58f 100644 (file)
@@ -2060,7 +2060,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
 unsigned long btrfs_force_ra(struct address_space *mapping,
                              struct file_ra_state *ra, struct file *file,
                              pgoff_t offset, pgoff_t last_index);
-int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_delete_inode(struct inode *inode);
 void btrfs_put_inode(struct inode *inode);
index 7d4f948bc22a5b5c811b93b385ff36d5b49e8977..ec5423790bbbec9a39bdc4528a069d3b54738456 100644 (file)
@@ -4292,8 +4292,9 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
  * beyond EOF, then the page is guaranteed safe against truncation until we
  * unlock the page.
  */
-int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        struct inode *inode = fdentry(vma->vm_file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -4362,6 +4363,8 @@ again:
 out_unlock:
        unlock_page(page);
 out:
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
        return ret;
 }
 
index 73abe6d8218c0629152bab23344e5b1bb3edeba3..6d51a3da362c9f110882374ceb22733113d24255 100644 (file)
@@ -2313,9 +2313,10 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
  * unlock the page.
  */
 int
-block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                   get_block_t get_block)
 {
+       struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        unsigned long end;
        loff_t size;
@@ -2340,6 +2341,9 @@ block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
                ret = block_commit_write(page, 0, end);
 
 out_unlock:
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
+
        unlock_page(page);
        return ret;
 }
index 6083bb38057b8bca9fd18f83a871435d787f2468..990c9400092414ed6cedeffe724afe103d116efc 100644 (file)
@@ -1098,7 +1098,7 @@ extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
 extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from);
-extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t ext4_get_reserved_space(struct inode *inode);
 
 /* ioctl.c */
index 71d3ecd5db798ce6bcab06331ca7ec4819f62ea1..dd82ff390067482ac31fac5f5a9e3414d45cbcdf 100644 (file)
@@ -5146,8 +5146,9 @@ static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
        return !buffer_mapped(bh);
 }
 
-int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        loff_t size;
        unsigned long len;
        int ret = -EINVAL;
@@ -5199,6 +5200,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
                goto out_unlock;
        ret = 0;
 out_unlock:
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
        up_read(&inode->i_alloc_sem);
        return ret;
 }
index 821d10f719bd06f6c49f855ace9a5d9a3abe3b41..4e340fedf768e92a1f3c60e8e2cd8621a7ecb87f 100644 (file)
@@ -1234,8 +1234,9 @@ static void fuse_vma_close(struct vm_area_struct *vma)
  * - sync(2)
  * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
  */
-static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        /*
         * Don't use page->mapping as it may become NULL from a
         * concurrent truncate.
index 3b9e8de3500be1c40b2fbb798cb8dcdc8d4b51b5..70b9b8548945d92450afc5936eddf72744ecccd7 100644 (file)
@@ -337,8 +337,9 @@ static int gfs2_allocate_page_backing(struct page *page)
  * blocks allocated on disk to back that page.
  */
 
-static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -412,6 +413,8 @@ out_unlock:
        gfs2_glock_dq(&gh);
 out:
        gfs2_holder_uninit(&gh);
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
        return ret;
 }
 
index 90f292b520d25eec248a9728b7d61cf20cd061f9..cec79392e4ba3f1b746166928b073379e2622ab4 100644 (file)
@@ -451,8 +451,9 @@ const struct address_space_operations nfs_file_aops = {
        .launder_page = nfs_launder_page,
 };
 
-static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        struct file *filp = vma->vm_file;
        struct dentry *dentry = filp->f_path.dentry;
        unsigned pagelen;
@@ -483,6 +484,8 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
                ret = pagelen;
 out_unlock:
        unlock_page(page);
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
        return ret;
 }
 
index eea1d24713ea13d6f4500eb059613a75d98ae933..b606496b72ec55c65554c6e72a9353e7739a4e9f 100644 (file)
@@ -154,8 +154,9 @@ out:
        return ret;
 }
 
-static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        struct buffer_head *di_bh = NULL;
        sigset_t blocked, oldset;
@@ -196,7 +197,8 @@ out:
        ret2 = ocfs2_vm_op_unblock_sigs(&oldset);
        if (ret2 < 0)
                mlog_errno(ret2);
-
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
        return ret;
 }
 
index 93b6de51f261727f2c5f4e62814a0e79d3b109b5..0ff89fe71e5103c3effe3dfd27dc2bac58b8bfc5 100644 (file)
@@ -1434,8 +1434,9 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
  * mmap()d file has taken write protection fault and is being made
  * writable. UBIFS must ensure page is budgeted for.
  */
-static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct timespec now = ubifs_current_time(inode);
@@ -1447,7 +1448,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
        ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
 
        if (unlikely(c->ro_media))
-               return -EROFS;
+               return VM_FAULT_SIGBUS; /* -EROFS */
 
        /*
         * We have not locked @page so far so we may budget for changing the
@@ -1480,7 +1481,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
                if (err == -ENOSPC)
                        ubifs_warn("out of space for mmapped file "
                                   "(inode number %lu)", inode->i_ino);
-               return err;
+               return VM_FAULT_SIGBUS;
        }
 
        lock_page(page);
@@ -1520,6 +1521,8 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
 out_unlock:
        unlock_page(page);
        ubifs_release_budget(c, &req);
+       if (err)
+               err = VM_FAULT_SIGBUS;
        return err;
 }
 
index e14c4e3aea0c331f67ba529946e201c6d814e8ce..f4e25544157422812b4a1e576d3539e86b75b9bd 100644 (file)
@@ -234,9 +234,9 @@ xfs_file_mmap(
 STATIC int
 xfs_vm_page_mkwrite(
        struct vm_area_struct   *vma,
-       struct page             *page)
+       struct vm_fault         *vmf)
 {
-       return block_page_mkwrite(vma, page, xfs_get_blocks);
+       return block_page_mkwrite(vma, vmf, xfs_get_blocks);
 }
 
 const struct file_operations xfs_file_operations = {
index f19fd9045ea0a621e05bc6eb11d515a78656f8bb..3d7bcde2e3325dacb1e5ec32ee3f1a50ad4e1725 100644 (file)
@@ -216,7 +216,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
                        get_block_t *, loff_t *);
 int generic_cont_expand_simple(struct inode *inode, loff_t size);
 int block_commit_write(struct page *page, unsigned from, unsigned to);
-int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                                get_block_t get_block);
 void block_sync_page(struct page *);
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
index 2223f8dfa568c2897dabab9b6fa07bc0aead39ea..aeabe953ba4f6d85d4492ab6915d4d6d589cc71c 100644 (file)
@@ -135,6 +135,7 @@ extern pgprot_t protection_map[16];
 
 #define FAULT_FLAG_WRITE       0x01    /* Fault was a write access */
 #define FAULT_FLAG_NONLINEAR   0x02    /* Fault was via a nonlinear mapping */
+#define FAULT_FLAG_MKWRITE     0x04    /* Fault was mkwrite of existing pte */
 
 /*
  * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -187,7 +188,7 @@ struct vm_operations_struct {
 
        /* notification that a previously read-only page is about to become
         * writable, if an error is returned it will cause a SIGBUS */
-       int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
+       int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
        /* called by access_process_vm when get_user_pages() fails, typically
         * for use by special VMAs that can switch between memory and hardware
index 5b4ad5e4f98dfd7b3bf351dc8df27a4ccf76d2cf..cf6873e91c6a7eeb166031a7c8ae9fd4f3dda100 100644 (file)
@@ -1945,6 +1945,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 * get_user_pages(.write=1, .force=1).
                 */
                if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+                       struct vm_fault vmf;
+                       int tmp;
+
+                       vmf.virtual_address = (void __user *)(address &
+                                                               PAGE_MASK);
+                       vmf.pgoff = old_page->index;
+                       vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+                       vmf.page = old_page;
+
                        /*
                         * Notify the address space that the page is about to
                         * become writable so that it can prohibit this or wait
@@ -1956,8 +1965,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_get(old_page);
                        pte_unmap_unlock(page_table, ptl);
 
-                       if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+                       tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                       if (unlikely(tmp &
+                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                               ret = tmp;
                                goto unwritable_page;
+                       }
 
                        /*
                         * Since we dropped the lock we need to revalidate
@@ -2106,7 +2119,7 @@ oom:
 
 unwritable_page:
        page_cache_release(old_page);
-       return VM_FAULT_SIGBUS;
+       return ret;
 }
 
 /*
@@ -2648,9 +2661,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                         * to become writable
                         */
                        if (vma->vm_ops->page_mkwrite) {
+                               int tmp;
+
                                unlock_page(page);
-                               if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
-                                       ret = VM_FAULT_SIGBUS;
+                               vmf.flags |= FAULT_FLAG_MKWRITE;
+                               tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                               if (unlikely(tmp &
+                                         (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                                       ret = tmp;
                                        anon = 1; /* no anon but release vmf.page */
                                        goto out_unlocked;
                                }