aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/inode.c11
3 files changed, 21 insertions, 1 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 2411baf3522..2a8c242bc4f 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -91,6 +91,9 @@ struct btrfs_inode {
unsigned long runtime_flags;
+ /* Keep track of who's O_SYNC/fsycing currently */
+ atomic_t sync_writers;
+
/* full 64 bit generation number, struct vfs_inode doesn't have a big
* enough field for this.
*/
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 71c2dc1ea15..7f4654a1520 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1472,6 +1472,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
ssize_t num_written = 0;
ssize_t err = 0;
size_t count, ocount;
+ bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
sb_start_write(inode->i_sb);
@@ -1529,6 +1530,9 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
}
}
+ if (sync)
+ atomic_inc(&BTRFS_I(inode)->sync_writers);
+
if (unlikely(file->f_flags & O_DIRECT)) {
num_written = __btrfs_direct_write(iocb, iov, nr_segs,
pos, ppos, count, ocount);
@@ -1563,6 +1567,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
num_written = err;
}
out:
+ if (sync)
+ atomic_dec(&BTRFS_I(inode)->sync_writers);
sb_end_write(inode->i_sb);
current->backing_dev_info = NULL;
return num_written ? num_written : err;
@@ -1613,7 +1619,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* out of the ->i_mutex. If so, we can flush the dirty pages by
* multi-task, and make the performance up.
*/
+ atomic_inc(&BTRFS_I(inode)->sync_writers);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ atomic_dec(&BTRFS_I(inode)->sync_writers);
if (ret)
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 123815f3b45..7855aac3670 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1622,6 +1622,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int ret = 0;
int skip_sum;
int metadata = 0;
+ int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -1644,7 +1645,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
goto out;
}
goto mapit;
- } else if (!skip_sum) {
+ } else if (async && !skip_sum) {
/* csum items have already been cloned */
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
@@ -1655,6 +1656,10 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
goto out;
+ } else if (!skip_sum) {
+ ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+ if (ret)
+ goto out;
}
mapit:
@@ -6333,6 +6338,9 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
+ if (async_submit)
+ async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
+
bio_get(bio);
if (!write) {
@@ -7113,6 +7121,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
ei->io_tree.track_uptodate = 1;
ei->io_failure_tree.track_uptodate = 1;
+ atomic_set(&ei->sync_writers, 0);
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);