diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-01 10:26:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-01 10:26:23 -0700 |
commit | a0e881b7c189fa2bd76c024dbff91e79511c971d (patch) | |
tree | 0c801918565b08921d21aceee5b326f64d998f5f /mm | |
parent | eff0d13f3823f35d70228cd151d2a2c89288ff32 (diff) | |
parent | dbc6e0222d79e78925fe20733844a796a4b72cf9 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull second vfs pile from Al Viro:
"The stuff in there: fsfreeze deadlock fixes by Jan (essentially, the
deadlock reproduced by xfstests 068), symlink and hardlink restriction
patches, plus assorted cleanups and fixes.
Note that another fsfreeze deadlock (emergency thaw one) is *not*
dealt with - the series by Fernando conflicts a lot with Jan's, breaks
userland ABI (FIFREEZE semantics gets changed) and trades the deadlock
for massive vfsmount leak; this is going to be handled next cycle.
There probably will be another pull request, but that stuff won't be
in it."
Fix up trivial conflicts due to unrelated changes next to each other in
drivers/{staging/gdm72xx/usb_boot.c, usb/gadget/storage_common.c}
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (54 commits)
delousing target_core_file a bit
Documentation: Correct s_umount state for freeze_fs/unfreeze_fs
fs: Remove old freezing mechanism
ext2: Implement freezing
btrfs: Convert to new freezing mechanism
nilfs2: Convert to new freezing mechanism
ntfs: Convert to new freezing mechanism
fuse: Convert to new freezing mechanism
gfs2: Convert to new freezing mechanism
ocfs2: Convert to new freezing mechanism
xfs: Convert to new freezing code
ext4: Convert to new freezing mechanism
fs: Protect write paths by sb_start_write - sb_end_write
fs: Skip atime update on frozen filesystem
fs: Add freezing handling to mnt_want_write() / mnt_drop_write()
fs: Improve filesystem freezing handling
switch the protection of percpu_counter list to spinlock
nfsd: Push mnt_want_write() outside of i_mutex
btrfs: Push mnt_want_write() outside of i_mutex
fat: Push mnt_want_write() outside of i_mutex
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 31 | ||||
-rw-r--r-- | mm/filemap_xip.c | 6 | ||||
-rw-r--r-- | mm/memory.c | 14 |
3 files changed, 40 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index a4a5260b027..fa5ca304148 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1712,8 +1712,35 @@ page_not_uptodate: } EXPORT_SYMBOL(filemap_fault); +int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page = vmf->page; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + int ret = VM_FAULT_LOCKED; + + sb_start_pagefault(inode->i_sb); + file_update_time(vma->vm_file); + lock_page(page); + if (page->mapping != inode->i_mapping) { + unlock_page(page); + ret = VM_FAULT_NOPAGE; + goto out; + } + /* + * We mark the page dirty already here so that when freeze is in + * progress, we are guaranteed that writeback during freezing will + * see the dirty page and writeprotect it again. + */ + set_page_dirty(page); +out: + sb_end_pagefault(inode->i_sb); + return ret; +} +EXPORT_SYMBOL(filemap_page_mkwrite); + const struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, + .page_mkwrite = filemap_page_mkwrite, }; /* This is used for a general mmap of a disk file */ @@ -2407,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, count = ocount; pos = *ppos; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; @@ -2507,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, BUG_ON(iocb->ki_pos != pos); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); @@ -2520,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ret = err; } blk_finish_plug(&plug); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL(generic_file_aio_write); diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 213ca1f5340..13e013b1270 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -304,6 +304,7 @@ out: static const struct vm_operations_struct xip_file_vm_ops = { .fault = xip_file_fault, + .page_mkwrite = filemap_page_mkwrite, }; int xip_file_mmap(struct file * file, struct vm_area_struct * vma) @@ -401,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, loff_t pos; ssize_t ret; + sb_start_write(inode->i_sb); + mutex_lock(&inode->i_mutex); if (!access_ok(VERIFY_READ, buf, len)) { @@ -411,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, pos = *ppos; count = len; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -436,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, current->backing_dev_info = NULL; out_up: mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL_GPL(xip_file_write); diff --git a/mm/memory.c b/mm/memory.c index 482f089765f..57361708d1a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2650,6 +2650,9 @@ reuse: if (!page_mkwrite) { wait_on_page_locked(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite); + /* file_update_time outside page_lock */ + if (vma->vm_file) + file_update_time(vma->vm_file); } put_page(dirty_page); if (page_mkwrite) { @@ -2667,10 +2670,6 @@ reuse: } } - /* file_update_time outside page_lock */ - if (vma->vm_file) - file_update_time(vma->vm_file); - return ret; } @@ -3339,12 +3338,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (dirty_page) { struct address_space *mapping = page->mapping; + int dirtied = 0; if (set_page_dirty(dirty_page)) - page_mkwrite = 1; + dirtied = 1; unlock_page(dirty_page); put_page(dirty_page); - if (page_mkwrite && mapping) { + if ((dirtied || page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still * dirty their pages @@ -3353,7 +3353,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, } /* file_update_time outside page_lock */ - if (vma->vm_file) + if (vma->vm_file && !page_mkwrite) file_update_time(vma->vm_file); } else { unlock_page(vmf.page); |