aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/autofs4/expire.c36
-rw-r--r--fs/bio.c13
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c9
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/delayed-inode.c12
-rw-r--r--fs/btrfs/delayed-ref.c163
-rw-r--r--fs/btrfs/delayed-ref.h4
-rw-r--r--fs/btrfs/disk-io.c53
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c123
-rw-r--r--fs/btrfs/extent_io.c17
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/inode.c329
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/locking.c2
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/qgroup.c12
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/super.c19
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/btrfs/volumes.c37
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/buffer.c66
-rw-r--r--fs/ceph/debugfs.c1
-rw-r--r--fs/ceph/inode.c15
-rw-r--r--fs/ceph/ioctl.c3
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/inode.c24
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/cifs/smb2misc.c16
-rw-r--r--fs/cifs/smb2pdu.h10
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--fs/compat.c10
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/ecryptfs/file.c10
-rw-r--r--fs/ecryptfs/inode.c5
-rw-r--r--fs/ecryptfs/main.c1
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exofs/inode.c27
-rw-r--r--fs/exofs/ore.c14
-rw-r--r--fs/exofs/super.c11
-rw-r--r--fs/ext3/inode.c25
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/balloc.c62
-rw-r--r--fs/ext4/bitmap.c1
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/inode.c10
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/dev.c1
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/fuse/file.c15
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c44
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/jbd/journal.c9
-rw-r--r--fs/jbd2/journal.c7
-rw-r--r--fs/logfs/dev_bdev.c15
-rw-r--r--fs/logfs/inode.c18
-rw-r--r--fs/logfs/journal.c2
-rw-r--r--fs/logfs/readwrite.c1
-rw-r--r--fs/logfs/segment.c2
-rw-r--r--fs/namei.c10
-rw-r--r--fs/nfs/Makefile18
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/idmap.c62
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4proc.c127
-rw-r--r--fs/nfs/nfs4super.c15
-rw-r--r--fs/nfs/nfs4xdr.c41
-rw-r--r--fs/nfs/objlayout/objio_osd.c55
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c39
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/super.c41
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4callback.c4
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nilfs2/super.c4
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/open.c9
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/reiserfs/bitmap.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/stat.c2
-rw-r--r--fs/super.c40
-rw-r--r--fs/ubifs/debug.h2
-rw-r--r--fs/ubifs/file.c10
-rw-r--r--fs/ubifs/lpt.c5
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/super.c5
-rw-r--r--fs/udf/file.c35
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/udf/super.c7
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_ialloc.c17
-rw-r--r--fs/xfs/xfs_rtalloc.c2
110 files changed, 1104 insertions, 889 deletions
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68ecef9..842d00048a6 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -94,25 +94,21 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
{
struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
struct list_head *next;
- struct dentry *p, *q;
+ struct dentry *q;
spin_lock(&sbi->lookup_lock);
+ spin_lock(&root->d_lock);
- if (prev == NULL) {
- spin_lock(&root->d_lock);
+ if (prev)
+ next = prev->d_u.d_child.next;
+ else {
prev = dget_dlock(root);
next = prev->d_subdirs.next;
- p = prev;
- goto start;
}
- p = prev;
- spin_lock(&p->d_lock);
-again:
- next = p->d_u.d_child.next;
-start:
+cont:
if (next == &root->d_subdirs) {
- spin_unlock(&p->d_lock);
+ spin_unlock(&root->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
@@ -121,16 +117,15 @@ start:
q = list_entry(next, struct dentry, d_u.d_child);
spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
- /* Negative dentry - try next */
- if (!simple_positive(q)) {
- spin_unlock(&p->d_lock);
- lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_);
- p = q;
- goto again;
+ /* Already gone or negative dentry (under construction) - try next */
+ if (q->d_count == 0 || !simple_positive(q)) {
+ spin_unlock(&q->d_lock);
+ next = q->d_u.d_child.next;
+ goto cont;
}
dget_dlock(q);
spin_unlock(&q->d_lock);
- spin_unlock(&p->d_lock);
+ spin_unlock(&root->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
@@ -404,11 +399,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
DPRINTK("checking mountpoint %p %.*s",
dentry, (int)dentry->d_name.len, dentry->d_name.name);
- /* Path walk currently on this dentry? */
- ino_count = atomic_read(&ino->count) + 2;
- if (dentry->d_count > ino_count)
- goto next;
-
/* Can we umount this guy */
if (autofs4_mount_busy(mnt, dentry))
goto next;
diff --git a/fs/bio.c b/fs/bio.c
index 73922abba83..71072ab9912 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
{
unsigned int sz = sizeof(struct bio) + extra_size;
struct kmem_cache *slab = NULL;
- struct bio_slab *bslab;
+ struct bio_slab *bslab, *new_bio_slabs;
unsigned int i, entry = -1;
mutex_lock(&bio_slab_lock);
@@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
if (bio_slab_nr == bio_slab_max && entry == -1) {
bio_slab_max <<= 1;
- bio_slabs = krealloc(bio_slabs,
- bio_slab_max * sizeof(struct bio_slab),
- GFP_KERNEL);
- if (!bio_slabs)
+ new_bio_slabs = krealloc(bio_slabs,
+ bio_slab_max * sizeof(struct bio_slab),
+ GFP_KERNEL);
+ if (!new_bio_slabs)
goto out_unlock;
+ bio_slabs = new_bio_slabs;
}
if (entry == -1)
entry = bio_slab_nr++;
@@ -1312,7 +1313,7 @@ EXPORT_SYMBOL(bio_copy_kern);
* Note that this code is very hard to test under normal circumstances because
* direct-io pins the pages with get_user_pages(). This makes
* is_page_cache_freeable return false, and the VM will not clean the pages.
- * But other code (eg, pdflush) could clean the pages if they are mapped
+ * But other code (eg, flusher threads) could clean the pages if they are mapped
* pagecache.
*
* Simply disabling the call to bio_set_pages_dirty() is a good way to test the
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1e519195d45..38e721b35d4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
+ blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
@@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0 && ret > 0)
ret = err;
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_aio_write);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index a256f3b2a84..ff6475f409d 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
ret = extent_from_logical(fs_info, logical, path,
&found_key);
btrfs_release_path(path);
- if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
- ret = -EINVAL;
if (ret < 0)
return ret;
+ if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ return -EINVAL;
extent_item_pos = logical - found_key.objectid;
ret = iterate_extent_inodes(fs_info, found_key.objectid,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 86eff48dab7..43d1c5a3a03 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace)
btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(alloc_workspace);
wake:
+ smp_mb();
if (waitqueue_active(workspace_wait))
wake_up(workspace_wait);
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9d7621f271f..6d183f60d63 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -421,12 +421,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
spin_unlock(&fs_info->tree_mod_seq_lock);
/*
- * we removed the lowest blocker from the blocker list, so there may be
- * more processible delayed refs.
- */
- wake_up(&fs_info->tree_mod_seq_wait);
-
- /*
* anything that's lower than the lowest existing (read: blocked)
* sequence number can be removed from the tree.
*/
@@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
u32 nritems;
int ret;
+ if (btrfs_header_level(eb) == 0)
+ return;
+
nritems = btrfs_header_nritems(eb);
for (i = nritems - 1; i >= 0; i--) {
ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4bab807227a..0d195b50766 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1252,7 +1252,6 @@ struct btrfs_fs_info {
atomic_t tree_mod_seq;
struct list_head tree_mod_seq_list;
struct seq_list tree_mod_seq_elem;
- wait_queue_head_t tree_mod_seq_wait;
/* this protects tree_mod_log */
rwlock_t tree_mod_log_lock;
@@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 logical_offset, u32 *dst);
+ struct bio *bio, u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 335605c8cea..07d5eeb1e6f 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
rb_erase(&delayed_item->rb_node, root);
delayed_item->delayed_node->count--;
- atomic_dec(&delayed_root->items);
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+ if (atomic_dec_return(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND &&
waitqueue_active(&delayed_root->wait))
wake_up(&delayed_root->wait);
}
@@ -1028,9 +1028,10 @@ do_again:
btrfs_release_delayed_item(prev);
ret = 0;
btrfs_release_path(path);
- if (curr)
+ if (curr) {
+ mutex_unlock(&node->mutex);
goto do_again;
- else
+ } else
goto delete_fail;
}
@@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
delayed_node->count--;
delayed_root = delayed_node->root->fs_info->delayed_root;
- atomic_dec(&delayed_root->items);
- if (atomic_read(&delayed_root->items) <
+ if (atomic_dec_return(&delayed_root->items) <
BTRFS_DELAYED_BACKGROUND &&
waitqueue_active(&delayed_root->wait))
wake_up(&delayed_root->wait);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index da7419ed01b..ae941177339 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -38,17 +38,14 @@
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
struct btrfs_delayed_tree_ref *ref1)
{
- if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- } else {
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
- }
+ if (ref1->root < ref2->root)
+ return -1;
+ if (ref1->root > ref2->root)
+ return 1;
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
return 0;
}
@@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
* type of the delayed backrefs and content of delayed backrefs.
*/
static int comp_entry(struct btrfs_delayed_ref_node *ref2,
- struct btrfs_delayed_ref_node *ref1)
+ struct btrfs_delayed_ref_node *ref1,
+ bool compare_seq)
{
if (ref1->bytenr < ref2->bytenr)
return -1;
@@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
if (ref1->type > ref2->type)
return 1;
/* merging of sequenced refs is not allowed */
- if (ref1->seq < ref2->seq)
- return -1;
- if (ref1->seq > ref2->seq)
- return 1;
+ if (compare_seq) {
+ if (ref1->seq < ref2->seq)
+ return -1;
+ if (ref1->seq > ref2->seq)
+ return 1;
+ }
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
rb_node);
- cmp = comp_entry(entry, ins);
+ cmp = comp_entry(entry, ins, 1);
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
@@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
return 0;
}
+static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_node *ref)
+{
+ rb_erase(&ref->rb_node, &delayed_refs->root);
+ ref->in_tree = 0;
+ btrfs_put_delayed_ref(ref);
+ delayed_refs->num_entries--;
+ if (trans->delayed_ref_updates)
+ trans->delayed_ref_updates--;
+}
+
+static int merge_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_node *ref, u64 seq)
+{
+ struct rb_node *node;
+ int merged = 0;
+ int mod = 0;
+ int done = 0;
+
+ node = rb_prev(&ref->rb_node);
+ while (node) {
+ struct btrfs_delayed_ref_node *next;
+
+ next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ node = rb_prev(node);
+ if (next->bytenr != ref->bytenr)
+ break;
+ if (seq && next->seq >= seq)
+ break;
+ if (comp_entry(ref, next, 0))
+ continue;
+
+ if (ref->action == next->action) {
+ mod = next->ref_mod;
+ } else {
+ if (ref->ref_mod < next->ref_mod) {
+ struct btrfs_delayed_ref_node *tmp;
+
+ tmp = ref;
+ ref = next;
+ next = tmp;
+ done = 1;
+ }
+ mod = -next->ref_mod;
+ }
+
+ merged++;
+ drop_delayed_ref(trans, delayed_refs, next);
+ ref->ref_mod += mod;
+ if (ref->ref_mod == 0) {
+ drop_delayed_ref(trans, delayed_refs, ref);
+ break;
+ } else {
+ /*
+ * You can't have multiples of the same ref on a tree
+ * block.
+ */
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
+ }
+
+ if (done)
+ break;
+ node = rb_prev(&ref->rb_node);
+ }
+
+ return merged;
+}
+
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct rb_node *node;
+ u64 seq = 0;
+
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
+ struct seq_list *elem;
+
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
+ struct seq_list, list);
+ seq = elem->seq;
+ }
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
+ node = rb_prev(&head->node.rb_node);
+ while (node) {
+ struct btrfs_delayed_ref_node *ref;
+
+ ref = rb_entry(node, struct btrfs_delayed_ref_node,
+ rb_node);
+ if (ref->bytenr != head->node.bytenr)
+ break;
+
+ /* We can't merge refs that are outside of our seq count */
+ if (seq && ref->seq >= seq)
+ break;
+ if (merge_ref(trans, delayed_refs, ref, seq))
+ node = rb_prev(&head->node.rb_node);
+ else
+ node = rb_prev(node);
+ }
+}
+
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
u64 seq)
@@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans,
* every changing the extent allocation tree.
*/
existing->ref_mod--;
- if (existing->ref_mod == 0) {
- rb_erase(&existing->rb_node,
- &delayed_refs->root);
- existing->in_tree = 0;
- btrfs_put_delayed_ref(existing);
- delayed_refs->num_entries--;
- if (trans->delayed_ref_updates)
- trans->delayed_ref_updates--;
- } else {
+ if (existing->ref_mod == 0)
+ drop_delayed_ref(trans, delayed_refs, existing);
+ else
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
- }
} else {
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action,
for_cow);
- if (!need_ref_seq(for_cow, ref_root) &&
- waitqueue_active(&fs_info->tree_mod_seq_wait))
- wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
if (need_ref_seq(for_cow, ref_root))
btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
action, for_cow);
- if (!need_ref_seq(for_cow, ref_root) &&
- waitqueue_active(&fs_info->tree_mod_seq_wait))
- wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
if (need_ref_seq(for_cow, ref_root))
btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data);
- if (waitqueue_active(&fs_info->tree_mod_seq_wait))
- wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 0d7c90c366b..ab530059584 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op);
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62e0cafd6e2..22e98e04c2e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
btree_get_extent, mirror_num);
- if (!ret && !verify_parent_transid(io_tree, eb,
+ if (!ret) {
+ if (!verify_parent_transid(io_tree, eb,
parent_transid, 0))
- break;
+ break;
+ else
+ ret = -EIO;
+ }
/*
* This buffer's crc is fine, but its contents are corrupted, so
@@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work)
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
- atomic_dec(&fs_info->nr_async_submits);
-
- if (atomic_read(&fs_info->nr_async_submits) < limit &&
+ if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
@@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT;
- init_waitqueue_head(&fs_info->tree_mod_seq_wait);
-
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
@@ -2528,8 +2528,7 @@ retry_root_backup:
goto fail_trans_kthread;
/* do not make disk changes in broken FS */
- if (btrfs_super_log_root(disk_super) != 0 &&
- !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
+ if (btrfs_super_log_root(disk_super) != 0) {
u64 bytenr = btrfs_super_log_root(disk_super);
if (fs_devices->rw_devices == 0) {
@@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root)
/* clear out the rbtree of defraggable inodes */
btrfs_run_defrag_inodes(fs_info);
- /*
- * Here come 2 situations when btrfs is broken to flip readonly:
- *
- * 1. when btrfs flips readonly somewhere else before
- * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
- * and btrfs will skip to write sb directly to keep
- * ERROR state on disk.
- *
- * 2. when btrfs flips readonly just in btrfs_commit_super,
- * and in such case, btrfs cannot write sb via btrfs_commit_super,
- * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
- * btrfs will cleanup all FS resources first and write sb then.
- */
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
ret = btrfs_commit_super(root);
if (ret)
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
}
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- ret = btrfs_error_commit_super(root);
- if (ret)
- printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
- }
+ if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ btrfs_error_commit_super(root);
btrfs_put_block_group_cache(fs_info);
@@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
if (read_only)
return 0;
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- printk(KERN_WARNING "warning: mount fs with errors, "
- "running btrfsck is recommended\n");
- }
-
return 0;
}
-int btrfs_error_commit_super(struct btrfs_root *root)
+void btrfs_error_commit_super(struct btrfs_root *root)
{
- int ret;
-
mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root)
/* cleanup FS via transaction */
btrfs_cleanup_transaction(root);
-
- ret = write_ctree_super(NULL, root, 0);
-
- return ret;
}
static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
@@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
/* FIXME: cleanup wait for commit */
t->in_commit = 1;
t->blocked = 1;
+ smp_mb();
if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
wake_up(&root->fs_info->transaction_blocked_wait);
t->blocked = 0;
+ smp_mb();
if (waitqueue_active(&root->fs_info->transaction_wait))
wake_up(&root->fs_info->transaction_wait);
t->commit_done = 1;
+ smp_mb();
if (waitqueue_active(&t->commit_wait))
wake_up(&t->commit_wait);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 95e147eea23..c5b00a735fe 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_commit_super(struct btrfs_root *root);
-int btrfs_error_commit_super(struct btrfs_root *root);
+void btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4e1b153b7c4..ba58024d40d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2252,6 +2252,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
}
/*
+ * We need to try and merge add/drops of the same ref since we
+ * can run into issues with relocate dropping the implicit ref
+ * and then it being added back again before the drop can
+ * finish. If we merged anything we need to re-loop so we can
+ * get a good ref.
+ */
+ btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
+ locked_ref);
+
+ /*
* locked_ref is the head node, so we have to go one
* node back for any delayed ref updates
*/
@@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- /*
- * we modified num_entries, but as we're currently running
- * delayed refs, skip
- * wake_up(&delayed_refs->seq_wait);
- * here.
- */
+ if (locked_ref) {
+ /*
+ * when we play the delayed ref, also correct the
+ * ref_mod on head
+ */
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ locked_ref->node.ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ locked_ref->node.ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
spin_unlock(&delayed_refs->lock);
ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2350,22 +2371,6 @@ next:
return count;
}
-static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_root *delayed_refs,
- unsigned long num_refs,
- struct list_head *first_seq)
-{
- spin_unlock(&delayed_refs->lock);
- pr_debug("waiting for more refs (num %ld, first %p)\n",
- num_refs, first_seq);
- wait_event(fs_info->tree_mod_seq_wait,
- num_refs != delayed_refs->num_entries ||
- fs_info->tree_mod_seq_list.next != first_seq);
- pr_debug("done waiting for more refs (num %ld, first %p)\n",
- delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
- spin_lock(&delayed_refs->lock);
-}
-
#ifdef SCRAMBLE_DELAYED_REFS
/*
* Normally delayed refs get processed in ascending bytenr order. This
@@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct list_head cluster;
- struct list_head *first_seq = NULL;
int ret;
u64 delayed_start;
int run_all = count == (unsigned long)-1;
int run_most = 0;
- unsigned long num_refs = 0;
- int consider_waiting;
+ int loops;
/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
@@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
again:
- consider_waiting = 0;
+ loops = 0;
spin_lock(&delayed_refs->lock);
#ifdef SCRAMBLE_DELAYED_REFS
@@ -2512,31 +2515,6 @@ again:
if (ret)
break;
- if (delayed_start >= delayed_refs->run_delayed_start) {
- if (consider_waiting == 0) {
- /*
- * btrfs_find_ref_cluster looped. let's do one
- * more cycle. if we don't run any delayed ref
- * during that cycle (because we can't because
- * all of them are blocked) and if the number of
- * refs doesn't change, we avoid busy waiting.
- */
- consider_waiting = 1;
- num_refs = delayed_refs->num_entries;
- first_seq = root->fs_info->tree_mod_seq_list.next;
- } else {
- wait_for_more_refs(root->fs_info, delayed_refs,
- num_refs, first_seq);
- /*
- * after waiting, things have changed. we
- * dropped the lock and someone else might have
- * run some refs, built new clusters and so on.
- * therefore, we restart staleness detection.
- */
- consider_waiting = 0;
- }
- }
-
ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
spin_unlock(&delayed_refs->lock);
@@ -2549,9 +2527,26 @@ again:
if (count == 0)
break;
- if (ret || delayed_refs->run_delayed_start == 0) {
+ if (delayed_start >= delayed_refs->run_delayed_start) {
+ if (loops == 0) {
+ /*
+ * btrfs_find_ref_cluster looped. let's do one
+ * more cycle. if we don't run any delayed ref
+ * during that cycle (because we can't because
+ * all of them are blocked), bail out.
+ */
+ loops = 1;
+ } else {
+ /*
+ * no runnable refs left, stop trying
+ */
+ BUG_ON(run_all);
+ break;
+ }
+ }
+ if (ret) {
/* refs were run, let's reset staleness detection */
- consider_waiting = 0;
+ loops = 0;
}
}
@@ -3007,17 +3002,16 @@ again:
}
spin_unlock(&block_group->lock);
- num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
+ /*
+ * Try to preallocate enough space based on how big the block group is.
+ * Keep in mind this has to include any pinned space which could end up
+ * taking up quite a bit since it's not folded into the other space
+ * cache.
+ */
+ num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
if (!num_pages)
num_pages = 1;
- /*
- * Just to make absolutely sure we have enough space, we're going to
- * preallocate 12 pages worth of space for each block group. In
- * practice we ought to use at most 8, but we need extra space so we can
- * add our header and have a terminator between the extents and the
- * bitmaps.
- */
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
@@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (root->fs_info->quota_enabled) {
ret = btrfs_qgroup_reserve(root, num_bytes +
nr_extents * root->leafsize);
- if (ret)
+ if (ret) {
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
return ret;
+ }
}
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
rb_erase(&head->node.rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- smp_mb();
- if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
- wake_up(&root->fs_info->tree_mod_seq_wait);
/*
* we don't take a ref on the node because we're removing it from the
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 45c81bb4ac8..4c878476bb9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
state, mirror);
- if (ret) {
- /* no IO indicated but software detected errors
- * in the block, either checksum errors or
- * issues with the contents */
- struct btrfs_root *root =
- BTRFS_I(page->mapping->host)->root;
- struct btrfs_device *device;
-
+ if (ret)
uptodate = 0;
- device = btrfs_find_device_for_logical(
- root, start, mirror);
- if (device)
- btrfs_dev_stat_inc_and_print(device,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- } else {
+ else
clean_io_failure(start, page);
- }
}
if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b45b9de0c21..857d93cd01d 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
}
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 offset, u32 *dst)
+ struct bio *bio, u64 offset)
{
- return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1);
+ return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 83baec24946..ec154f95464 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -324,7 +324,8 @@ static noinline int add_async_extent(struct async_cow *cow,
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that pdflush sent them down.
+ * are written in the same order that the flusher thread sent them
+ * down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
@@ -1007,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
- atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
-
- if (atomic_read(&root->fs_info->async_delalloc_pages) <
+ if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
5 * 1024 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
@@ -1884,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
@@ -3173,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode_inc_iversion(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, root, dir);
+ ret = btrfs_update_inode_fallback(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, root, ret);
out:
@@ -5773,18 +5775,112 @@ out:
return ret;
}
+static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ struct extent_state **cached_state, int writing)
+{
+ struct btrfs_ordered_extent *ordered;
+ int ret = 0;
+
+ while (1) {
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ 0, cached_state);
+ /*
+ * We're concerned with the entire range that we're going to be
+ * doing DIO to, so we need to make sure theres no ordered
+ * extents in this range.
+ */
+ ordered = btrfs_lookup_ordered_range(inode, lockstart,
+ lockend - lockstart + 1);
+
+ /*
+ * We need to make sure there are no buffered pages in this
+ * range either, we could have raced between the invalidate in
+ * generic_file_direct_write and locking the extent. The
+ * invalidate needs to happen so that reads after a write do not
+ * get stale data.
+ */
+ if (!ordered && (!writing ||
+ !test_range_bit(&BTRFS_I(inode)->io_tree,
+ lockstart, lockend, EXTENT_UPTODATE, 0,
+ *cached_state)))
+ break;
+
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state, GFP_NOFS);
+
+ if (ordered) {
+ btrfs_start_ordered_extent(inode, ordered, 1);
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ /* Screw you mmap */
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ lockstart,
+ lockend);
+ if (ret)
+ break;
+
+ /*
+ * If we found a page that couldn't be invalidated just
+ * fall back to buffered.
+ */
+ ret = invalidate_inode_pages2_range(inode->i_mapping,
+ lockstart >> PAGE_CACHE_SHIFT,
+ lockend >> PAGE_CACHE_SHIFT);
+ if (ret)
+ break;
+ }
+
+ cond_resched();
+ }
+
+ return ret;
+}
+
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_state *cached_state = NULL;
u64 start = iblock << inode->i_blkbits;
+ u64 lockstart, lockend;
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
+ int unlock_bits = EXTENT_LOCKED;
+ int ret;
+
+ if (create) {
+ ret = btrfs_delalloc_reserve_space(inode, len);
+ if (ret)
+ return ret;
+ unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
+ } else {
+ len = min_t(u64, len, root->sectorsize);
+ }
+
+ lockstart = start;
+ lockend = start + len - 1;
+
+ /*
+ * If this errors out it's because we couldn't invalidate pagecache for
+ * this range and we need to fallback to buffered.
+ */
+ if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
+ return -ENOTBLK;
+
+ if (create) {
+ ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, EXTENT_DELALLOC, NULL,
+ &cached_state, GFP_NOFS);
+ if (ret)
+ goto unlock_err;
+ }
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- if (IS_ERR(em))
- return PTR_ERR(em);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto unlock_err;
+ }
/*
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
@@ -5803,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
- return -ENOTBLK;
+ ret = -ENOTBLK;
+ goto unlock_err;
}
/* Just a good old fashioned hole, return */
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
- /* DIO will do one hole at a time, so just unlock a sector */
- unlock_extent(&BTRFS_I(inode)->io_tree, start,
- start + root->sectorsize - 1);
- return 0;
+ ret = 0;
+ goto unlock_err;
}
/*
@@ -5826,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
*
*/
if (!create) {
- len = em->len - (start - em->start);
- goto map;
+ len = min(len, em->len - (start - em->start));
+ lockstart = start + len;
+ goto unlock;
}
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
@@ -5859,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
btrfs_end_transaction(trans, root);
if (ret) {
free_extent_map(em);
- return ret;
+ goto unlock_err;
}
goto unlock;
}
@@ -5872,14 +5968,12 @@ must_cow:
*/
len = bh_result->b_size;
em = btrfs_new_extent_direct(inode, em, start, len);
- if (IS_ERR(em))
- return PTR_ERR(em);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto unlock_err;
+ }
len = min(len, em->len - (start - em->start));
unlock:
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
- EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
- 0, NULL, GFP_NOFS);
-map:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
@@ -5897,9 +5991,44 @@ map:
i_size_write(inode, start + len);
}
+ /*
+ * In the case of write we need to clear and unlock the entire range,
+ * in the case of read we need to unlock only the end area that we
+ * aren't using if there is any left over space.
+ */
+ if (lockstart < lockend) {
+ if (create && len < lockend - lockstart) {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockstart + len - 1, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ /*
+ * Beside unlock, we also need to cleanup reserved space
+ * for the left range by attaching EXTENT_DO_ACCOUNTING.
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ lockstart + len, lockend,
+ unlock_bits | EXTENT_DO_ACCOUNTING,
+ 1, 0, NULL, GFP_NOFS);
+ } else {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ }
+ } else {
+ free_extent_state(cached_state);
+ }
+
free_extent_map(em);
return 0;
+
+unlock_err:
+ if (create)
+ unlock_bits |= EXTENT_DO_ACCOUNTING;
+
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ unlock_bits, 1, 0, &cached_state, GFP_NOFS);
+ return ret;
}
struct btrfs_dio_private {
@@ -5907,7 +6036,6 @@ struct btrfs_dio_private {
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
- u32 *csums;
void *private;
/* number of bios pending for this dio */
@@ -5927,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start;
- u32 *private = dip->csums;
start = dip->logical_offset;
do {
@@ -5935,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
struct page *page = bvec->bv_page;
char *kaddr;
u32 csum = ~(u32)0;
+ u64 private = ~(u32)0;
unsigned long flags;
+ if (get_state_private(&BTRFS_I(inode)->io_tree,
+ start, &private))
+ goto failed;
local_irq_save(flags);
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
@@ -5946,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
- if (csum != *private) {
+ if (csum != private) {
+failed:
printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
(unsigned long long)btrfs_ino(inode),
(unsigned long long)start,
- csum, *private);
+ csum, (unsigned)private);
err = -EIO;
}
}
start += bvec->bv_len;
- private++;
bvec++;
} while (bvec <= bvec_end);
@@ -5965,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private;
- kfree(dip->csums);
kfree(dip);
/* If we had a csum failure make sure to clear the uptodate flag */
@@ -6071,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
- u32 *csums, int async_submit)
+ int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -6104,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
if (ret)
goto err;
} else if (!skip_sum) {
- ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
- file_offset, csums);
+ ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
if (ret)
goto err;
}
@@ -6131,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
- u32 *csums = dip->csums;
int ret = 0;
int async_submit = 0;
- int write = rw & REQ_WRITE;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
@@ -6170,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
- csums, async_submit);
+ async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
- /* Write's use the ordered csums */
- if (!write && !skip_sum)
- csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
@@ -6209,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
- csums, async_submit);
+ async_submit);
if (!ret)
return 0;
@@ -6245,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
ret = -ENOMEM;
goto free_ordered;
}
- dip->csums = NULL;
-
- /* Write's use the ordered csum stuff, so we don't need dip->csums */
- if (!write && !skip_sum) {
- dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
- if (!dip->csums) {
- kfree(dip);
- ret = -ENOMEM;
- goto free_ordered;
- }
- }
dip->private = bio->bi_private;
dip->inode = inode;
@@ -6340,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
out:
return retval;
}
+
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct btrfs_ordered_extent *ordered;
- struct extent_state *cached_state = NULL;
- u64 lockstart, lockend;
- ssize_t ret;
- int writing = rw & WRITE;
- int write_bits = 0;
- size_t count = iov_length(iov, nr_segs);
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
- offset, nr_segs)) {
+ offset, nr_segs))
return 0;
- }
-
- lockstart = offset;
- lockend = offset + count - 1;
-
- if (writing) {
- ret = btrfs_delalloc_reserve_space(inode, count);
- if (ret)
- goto out;
- }
-
- while (1) {
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- 0, &cached_state);
- /*
- * We're concerned with the entire range that we're going to be
- * doing DIO to, so we need to make sure theres no ordered
- * extents in this range.
- */
- ordered = btrfs_lookup_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
-
- /*
- * We need to make sure there are no buffered pages in this
- * range either, we could have raced between the invalidate in
- * generic_file_direct_write and locking the extent. The
- * invalidate needs to happen so that reads after a write do not
- * get stale data.
- */
- if (!ordered && (!writing ||
- !test_range_bit(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, EXTENT_UPTODATE, 0,
- cached_state)))
- break;
-
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
-
- if (ordered) {
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- } else {
- /* Screw you mmap */
- ret = filemap_write_and_wait_range(file->f_mapping,
- lockstart,
- lockend);
- if (ret)
- goto out;
-
- /*
- * If we found a page that couldn't be invalidated just
- * fall back to buffered.
- */
- ret = invalidate_inode_pages2_range(file->f_mapping,
- lockstart >> PAGE_CACHE_SHIFT,
- lockend >> PAGE_CACHE_SHIFT);
- if (ret) {
- if (ret == -EBUSY)
- ret = 0;
- goto out;
- }
- }
-
- cond_resched();
- }
- /*
- * we don't use btrfs_set_extent_delalloc because we don't want
- * the dirty or uptodate bits
- */
- if (writing) {
- write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
- ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- EXTENT_DELALLOC, NULL, &cached_state,
- GFP_NOFS);
- if (ret) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_LOCKED | write_bits,
- 1, 0, &cached_state, GFP_NOFS);
- goto out;
- }
- }
-
- free_extent_state(cached_state);
- cached_state = NULL;
-
- ret = __blockdev_direct_IO(rw, iocb, inode,
+ return __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, 0);
-
- if (ret < 0 && ret != -EIOCBQUEUED) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
- offset + iov_length(iov, nr_segs) - 1,
- EXTENT_LOCKED | write_bits, 1, 0,
- &cached_state, GFP_NOFS);
- } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
- /*
- * We're falling back to buffered, unlock the section we didn't
- * do IO on.
- */
- clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
- offset + iov_length(iov, nr_segs) - 1,
- EXTENT_LOCKED | write_bits, 1, 0,
- &cached_state, GFP_NOFS);
- }
-out:
- free_extent_state(cached_state);
- return ret;
}
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index bc2f6ffff3c..9df50fa8a07 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root,
uuid_le_gen(&new_uuid);
memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
- root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+ root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
root_item.ctime = root_item.otime;
btrfs_set_root_ctransid(&root_item, trans->transid);
btrfs_set_root_otransid(&root_item, trans->transid);
@@ -664,10 +664,6 @@ static noinline int btrfs_mksubvol(struct path *parent,
struct dentry *dentry;
int error;
- error = mnt_want_write(parent->mnt);
- if (error)
- return error;
-
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, parent->dentry, namelen);
@@ -703,7 +699,6 @@ out_dput:
dput(dentry);
out_unlock:
mutex_unlock(&dir->i_mutex);
- mnt_drop_write(parent->mnt);
return error;
}
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index a44eff07480..2a1762c6604 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
{
if (eb->lock_nested) {
read_lock(&eb->lock);
- if (&eb->lock_nested && current->pid == eb->lock_owner) {
+ if (eb->lock_nested && current->pid == eb->lock_owner) {
read_unlock(&eb->lock);
return;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 643335a4fe3..051c7fe551d 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
- * for pdflush to find them
+ * for the flusher thread to find them
*/
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
filemap_fdatawrite_range(inode->i_mapping, start, end);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index bc424ae5a81..38b42e7bc91 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1364,13 +1364,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
spin_lock(&fs_info->qgroup_lock);
dstgroup = add_qgroup_rb(fs_info, objectid);
- if (!dstgroup)
+ if (IS_ERR(dstgroup)) {
+ ret = PTR_ERR(dstgroup);
goto unlock;
+ }
if (srcid) {
srcgroup = find_qgroup_rb(fs_info, srcid);
- if (!srcgroup)
+ if (!srcgroup) {
+ ret = -EINVAL;
goto unlock;
+ }
dstgroup->rfer = srcgroup->rfer - level_size;
dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
srcgroup->excl = level_size;
@@ -1379,8 +1383,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
qgroup_dirty(fs_info, srcgroup);
}
- if (!inherit)
+ if (!inherit) {
+ ret = -EINVAL;
goto unlock;
+ }
i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) {
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6bb465cca20..10d8e4d8807 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
struct timespec ct = CURRENT_TIME;
spin_lock(&root->root_times_lock);
- item->ctransid = trans->transid;
+ item->ctransid = cpu_to_le64(trans->transid);
item->ctime.sec = cpu_to_le64(ct.tv_sec);
- item->ctime.nsec = cpu_to_le64(ct.tv_nsec);
+ item->ctime.nsec = cpu_to_le32(ct.tv_nsec);
spin_unlock(&root->root_times_lock);
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8c6e61d6eed..83d6f9f9c22 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -100,10 +100,6 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
}
-/* NOTE:
- * We move write_super stuff at umount in order to avoid deadlock
- * for umount hold all lock.
- */
static void save_error_info(struct btrfs_fs_info *fs_info)
{
__save_error_info(fs_info);
@@ -842,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
- int ret;
trace_btrfs_sync_fs(wait);
@@ -853,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_wait_ordered_extents(root, 0, 0);
- trans = btrfs_start_transaction(root, 0);
+ spin_lock(&fs_info->trans_lock);
+ if (!fs_info->running_transaction) {
+ spin_unlock(&fs_info->trans_lock);
+ return 0;
+ }
+ spin_unlock(&fs_info->trans_lock);
+
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans, root);
- return ret;
+ return btrfs_commit_transaction(trans, root);
}
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1534,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
while (cur_devices) {
head = &cur_devices->devices;
list_for_each_entry(dev, head, dev_list) {
+ if (dev->missing)
+ continue;
if (!first_dev || dev->devid < first_dev->devid)
first_dev = dev;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 17be3dedacb..27c26004e05 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(parent_inode, parent_inode->i_size +
dentry->d_name.len * 2);
+ parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, parent_root, parent_inode);
if (ret)
goto abort_trans_dput;
@@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
memcpy(new_root_item->parent_uuid, root->root_item.uuid,
BTRFS_UUID_SIZE);
new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
- new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+ new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
btrfs_set_root_otransid(new_root_item, trans->transid);
memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b8708f994e6..88b969aeeb7 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -227,9 +227,8 @@ loop_lock:
cur = pending;
pending = pending->bi_next;
cur->bi_next = NULL;
- atomic_dec(&fs_info->nr_async_bios);
- if (atomic_read(&fs_info->nr_async_bios) < limit &&
+ if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
@@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
memcpy(new_device, device, sizeof(*new_device));
/* Safe because we are under uuid_mutex */
- name = rcu_string_strdup(device->name->str, GFP_NOFS);
- BUG_ON(device->name && !name); /* -ENOMEM */
- rcu_assign_pointer(new_device->name, name);
+ if (device->name) {
+ name = rcu_string_strdup(device->name->str, GFP_NOFS);
+ BUG_ON(device->name && !name); /* -ENOMEM */
+ rcu_assign_pointer(new_device->name, name);
+ }
new_device->bdev = NULL;
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
@@ -1744,10 +1745,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->fs_devices = root->fs_info->fs_devices;
- /*
- * we don't want write_supers to jump in here with our device
- * half setup
- */
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
@@ -4609,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root)
return ret;
}
-struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
- u64 logical, int mirror_num)
-{
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
- int ret;
- u64 map_length = 0;
- struct btrfs_bio *bbio = NULL;
- struct btrfs_device *device;
-
- BUG_ON(mirror_num == 0);
- ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
- mirror_num);
- if (ret) {
- BUG_ON(bbio != NULL);
- return NULL;
- }
- BUG_ON(mirror_num != bbio->mirror_num);
- device = bbio->stripes[mirror_num - 1].dev;
- kfree(bbio);
- return device;
-}
-
int btrfs_read_chunk_tree(struct btrfs_root *root)
{
struct btrfs_path *path;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5479325987b..53c06af92e8 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
-struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
- u64 logical, int mirror_num);
void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_root *root,
diff --git a/fs/buffer.c b/fs/buffer.c
index 9f6d2e41281..58e2e7b7737 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
/*
* Initialise the state of a blockdev page's buffers.
*/
-static void
+static sector_t
init_page_buffers(struct page *page, struct block_device *bdev,
sector_t block, int size)
{
@@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
block++;
bh = bh->b_this_page;
} while (bh != head);
+
+ /*
+ * Caller needs to validate requested block against end of device.
+ */
+ return end_block;
}
/*
* Create the page-cache page that contains the requested block.
*
- * This is user purely for blockdev mappings.
+ * This is used purely for blockdev mappings.
*/
-static struct page *
+static int
grow_dev_page(struct block_device *bdev, sector_t block,
- pgoff_t index, int size)
+ pgoff_t index, int size, int sizebits)
{
struct inode *inode = bdev->bd_inode;
struct page *page;
struct buffer_head *bh;
+ sector_t end_block;
+ int ret = 0; /* Will call free_more_memory() */
page = find_or_create_page(inode->i_mapping, index,
(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
if (!page)
- return NULL;
+ return ret;
BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
if (bh->b_size == size) {
- init_page_buffers(page, bdev, block, size);
- return page;
+ end_block = init_page_buffers(page, bdev,
+ index << sizebits, size);
+ goto done;
}
if (!try_to_free_buffers(page))
goto failed;
@@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
*/
spin_lock(&inode->i_mapping->private_lock);
link_dev_buffers(page, bh);
- init_page_buffers(page, bdev, block, size);
+ end_block = init_page_buffers(page, bdev, index << sizebits, size);
spin_unlock(&inode->i_mapping->private_lock);
- return page;
-
+done:
+ ret = (block < end_block) ? 1 : -ENXIO;
failed:
unlock_page(page);
page_cache_release(page);
- return NULL;
+ return ret;
}
/*
@@ -999,7 +1007,6 @@ failed:
static int
grow_buffers(struct block_device *bdev, sector_t block, int size)
{
- struct page *page;
pgoff_t index;
int sizebits;
@@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
bdevname(bdev, b));
return -EIO;
}
- block = index << sizebits;
+
/* Create a page with the proper size buffers.. */
- page = grow_dev_page(bdev, block, index, size);
- if (!page)
- return 0;
- unlock_page(page);
- page_cache_release(page);
- return 1;
+ return grow_dev_page(bdev, block, index, size, sizebits);
}
static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
- int ret;
- struct buffer_head *bh;
-
/* Size must be multiple of hard sectorsize */
if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
(size < 512 || size > PAGE_SIZE))) {
@@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
return NULL;
}
-retry:
- bh = __find_get_block(bdev, block, size);
- if (bh)
- return bh;
+ for (;;) {
+ struct buffer_head *bh;
+ int ret;
- ret = grow_buffers(bdev, block, size);
- if (ret == 0) {
- free_more_memory();
- goto retry;
- } else if (ret > 0) {
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
+
+ ret = grow_buffers(bdev, block, size);
+ if (ret < 0)
+ return NULL;
+ if (ret == 0)
+ free_more_memory();
}
- return NULL;
}
/*
@@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);
* which corresponds to the passed block_device, block and size. The
* returned buffer has its reference count incremented.
*
- * __getblk() cannot fail - it just keeps trying. If you pass it an
- * illegal block number, __getblk() will happily return a buffer_head
- * which represents the non-existent block. Very weird.
- *
* __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
* attempt is failing. FIXME, perhaps?
*/
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index fb962efdace..6d59006bfa2 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
int err = -ENOMEM;
dout("ceph_fs_debugfs_init\n");
+ BUG_ON(!fsc->client->debugfs_dir);
fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9fff9f3b17e..4b5762ef7c2 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
if (rinfo->head->is_dentry) {
struct inode *dir = req->r_locked_dir;
- err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
- session, req->r_request_started, -1,
- &req->r_caps_reservation);
- if (err < 0)
- return err;
+ if (dir) {
+ err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
+ session, req->r_request_started, -1,
+ &req->r_caps_reservation);
+ if (err < 0)
+ return err;
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
/*
@@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
* will have trouble splicing in the virtual snapdir later
*/
if (rinfo->head->is_dentry && !req->r_aborted &&
+ req->r_locked_dir &&
(rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
fsc->mount_options->snapdir_name,
req->r_dentry->d_name.len))) {
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8e3fb69fbe6..1396ceb4679 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
/* validate striping parameters */
if ((l->object_size & ~PAGE_MASK) ||
(l->stripe_unit & ~PAGE_MASK) ||
- ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+ (l->stripe_unit != 0 &&
+ ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
return -EINVAL;
/* make sure it's a valid data pool */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 074923ce593..f0cf934ba87 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
/* result already set, check signature */
if (server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
- server, mid->sequence_number + 1))
- cERROR(1, "Unexpected SMB signature");
+ int rc = 0;
+
+ rc = cifs_verify_signature(rdata->iov, rdata->nr_iov,
+ server,
+ mid->sequence_number + 1);
+ if (rc)
+ cERROR(1, "SMB signature verification returned "
+ "error = %d", rc);
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->bytes);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index cbe709ad666..781025be48b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -356,19 +356,12 @@ cifs_create_get_file_info:
cifs_create_set_dentry:
if (rc != 0) {
cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
+ CIFSSMBClose(xid, tcon, *fileHandle);
goto out;
}
d_drop(direntry);
d_add(direntry, newinode);
- /* ENOENT for create? How weird... */
- rc = -ENOENT;
- if (!newinode) {
- CIFSSMBClose(xid, tcon, *fileHandle);
- goto out;
- }
- rc = 0;
-
out:
kfree(buf);
kfree(full_path);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9154192b068..71e9ad9f596 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -917,7 +917,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
if (!buf) {
mutex_unlock(&cinode->lock_mutex);
free_xid(xid);
- return rc;
+ return -ENOMEM;
}
for (i = 0; i < 2; i++) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7354877fa3b..cb79c7edecb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- unsigned long oldtime = cifs_i->time;
cifs_revalidate_cache(inode, fattr);
+ spin_lock(&inode->i_lock);
inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
else
cifs_i->time = jiffies;
- cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
- oldtime, cifs_i->time);
-
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
cifs_i->server_eof = fattr->cf_eof;
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
* Can't safely change the file size here if the client is writing to
* it due to potential races.
*/
- spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
i_size_write(inode, fattr->cf_eof);
@@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb)
if (rc && tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
+ spin_lock(&inode->i_lock);
inode->i_mode |= S_IFDIR;
set_nlink(inode, 2);
inode->i_op = &cifs_ipc_inode_ops;
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
+ spin_unlock(&inode->i_lock);
} else if (rc) {
iget_failed(inode);
inode = ERR_PTR(rc);
@@ -1110,6 +1108,15 @@ undo_setattr:
goto out_close;
}
+/* copied from fs/nfs/dir.c with small changes */
+static void
+cifs_drop_nlink(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&inode->i_lock);
+}
/*
* If dentry->d_inode is null (usually meaning the cached dentry
@@ -1166,13 +1173,13 @@ retry_std_delete:
psx_del_no_retry:
if (!rc) {
if (inode)
- drop_nlink(inode);
+ cifs_drop_nlink(inode);
} else if (rc == -ENOENT) {
d_drop(dentry);
} else if (rc == -ETXTBSY) {
rc = cifs_rename_pending_delete(full_path, dentry, xid);
if (rc == 0)
- drop_nlink(inode);
+ cifs_drop_nlink(inode);
} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (attrs == NULL) {
@@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode,
* setting nlink not necessary except in cases where we failed to get it
* from the server or was set bogus
*/
+ spin_lock(&dentry->d_inode->i_lock);
if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2))
set_nlink(dentry->d_inode, 2);
-
+ spin_unlock(&dentry->d_inode->i_lock);
mode &= ~current_umask();
/* must turn on setgid bit if parent dir has it */
if (inode->i_mode & S_ISGID)
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 09e4b3ae456..e6ce3b11287 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
if (old_file->d_inode) {
cifsInode = CIFS_I(old_file->d_inode);
if (rc == 0) {
+ spin_lock(&old_file->d_inode->i_lock);
inc_nlink(old_file->d_inode);
+ spin_unlock(&old_file->d_inode->i_lock);
/* BB should we make this contingent on superblock flag NOATIME? */
/* old_file->d_inode->i_ctime = CURRENT_TIME;*/
/* parent dir timestamps will update from srv
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index a4ff5d54755..e4d3b996416 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
cERROR(1, "Bad protocol string signature header %x",
*(unsigned int *) hdr->ProtocolId);
if (mid != hdr->MessageId)
- cERROR(1, "Mids do not match");
+ cERROR(1, "Mids do not match: %llu and %llu", mid,
+ hdr->MessageId);
}
cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId);
return 1;
@@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length)
* ie Validate the wct via smb2_struct_sizes table above
*/
- if (length < 2 + sizeof(struct smb2_hdr)) {
+ if (length < sizeof(struct smb2_pdu)) {
if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
pdu->StructureSize2 = 0;
/*
@@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length)
return 1;
}
if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) {
- cERROR(1, "SMB length greater than maximum, mid=%lld", mid);
+ cERROR(1, "SMB length greater than maximum, mid=%llu", mid);
return 1;
}
if (check_smb2_hdr(hdr, mid))
return 1;
- if (hdr->StructureSize != SMB2_HEADER_SIZE) {
- cERROR(1, "Illegal structure size %d",
+ if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+ cERROR(1, "Illegal structure size %u",
le16_to_cpu(hdr->StructureSize));
return 1;
}
@@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length)
if (4 + len != clc_len) {
cFYI(1, "Calculated size %u length %u mismatch mid %llu",
clc_len, 4 + len, mid);
- if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */
- return 0; /* BB workaround Samba 3 bug SessSetup rsp */
+ /* server can return one byte more */
+ if (clc_len == 4 + len + 1)
+ return 0;
return 1;
}
return 0;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f37a1b41b40..15dc8eea827 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -87,10 +87,6 @@
#define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe)
-#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64)
-
-#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
-
/*
* SMB2 Header Definition
*
@@ -99,6 +95,9 @@
* "PDU" : "Protocol Data Unit" (ie a network "frame")
*
*/
+
+#define SMB2_HEADER_STRUCTURE_SIZE __constant_cpu_to_le16(64)
+
struct smb2_hdr {
__be32 smb2_buf_length; /* big endian on wire */
/* length is only two or three bytes - with
@@ -140,6 +139,9 @@ struct smb2_pdu {
* command code name for the struct. Note that structures must be packed.
*
*/
+
+#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_cpu_to_le16(9)
+
struct smb2_err_rsp {
struct smb2_hdr hdr;
__le16 StructureSize;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 83867ef348d..d9b639b95fa 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
/* convert the length into a more usable form */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
struct kvec iov;
+ int rc = 0;
iov.iov_base = mid->resp_buf;
iov.iov_len = len;
/* FIXME: add code to kill session */
- if (cifs_verify_signature(&iov, 1, server,
- mid->sequence_number + 1) != 0)
- cERROR(1, "Unexpected SMB signature");
+ rc = cifs_verify_signature(&iov, 1, server,
+ mid->sequence_number + 1);
+ if (rc)
+ cERROR(1, "SMB signature verification returned error = "
+ "%d", rc);
}
/* BB special case reconnect tid and uid here? */
diff --git a/fs/compat.c b/fs/compat.c
index 6161255fac4..1bdb350ea5d 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1155,11 +1155,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
struct file *file;
int fput_needed;
ssize_t ret;
+ loff_t pos;
file = fget_light(fd, &fput_needed);
if (!file)
return -EBADF;
- ret = compat_readv(file, vec, vlen, &file->f_pos);
+ pos = file->f_pos;
+ ret = compat_readv(file, vec, vlen, &pos);
+ file->f_pos = pos;
fput_light(file, fput_needed);
return ret;
}
@@ -1221,11 +1224,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
struct file *file;
int fput_needed;
ssize_t ret;
+ loff_t pos;
file = fget_light(fd, &fput_needed);
if (!file)
return -EBADF;
- ret = compat_writev(file, vec, vlen, &file->f_pos);
+ pos = file->f_pos;
+ ret = compat_writev(file, vec, vlen, &pos);
+ file->f_pos = pos;
fput_light(file, fput_needed);
return ret;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1faf4cb56f3..f86c720dba0 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
unsigned long user_addr;
size_t bytes;
struct buffer_head map_bh = { 0, };
+ struct blk_plug plug;
if (rw & WRITE)
rw = WRITE_ODIRECT;
@@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
PAGE_SIZE - user_addr / PAGE_SIZE);
}
+ blk_start_plug(&plug);
+
for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base;
sdio.size += bytes = iov[seg].iov_len;
@@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (sdio.bio)
dio_bio_submit(dio, &sdio);
+ blk_finish_plug(&plug);
+
/*
* It is possible that, we return short IO due to end of file.
* In that case, we need to release all the pages we got hold on.
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 44ce5c6a541..d45ba456812 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -275,8 +275,14 @@ out:
static int ecryptfs_flush(struct file *file, fl_owner_t td)
{
- return file->f_mode & FMODE_WRITE
- ? filemap_write_and_wait(file->f_mapping) : 0;
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+
+ if (lower_file->f_op && lower_file->f_op->flush) {
+ filemap_write_and_wait(file->f_mapping);
+ return lower_file->f_op->flush(lower_file, td);
+ }
+
+ return 0;
}
static int ecryptfs_release(struct inode *inode, struct file *file)
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 534b129ea67..cc7709e7c50 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -619,6 +619,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
struct dentry *trap = NULL;
+ struct inode *target_inode;
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -626,6 +627,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
+ target_inode = new_dentry->d_inode;
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
/* source should not be ancestor of target */
if (trap == lower_old_dentry) {
@@ -641,6 +643,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
goto out_lock;
+ if (target_inode)
+ fsstack_copy_attr_all(target_inode,
+ ecryptfs_inode_to_lower(target_inode));
fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 2768138eefe..9b627c15010 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode)
inode_info = ecryptfs_inode_to_private(inode);
if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
&inode_info->lower_file_mutex)) {
+ filemap_write_and_wait(inode->i_mapping);
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
mutex_unlock(&inode_info->lower_file_mutex);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1c8b5567080..eedec84c180 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
error = PTR_ERR(file);
goto out_free_fd;
}
- fd_install(fd, file);
ep->file = file;
+ fd_install(fd, file);
return fd;
out_free_fd:
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 5badb0c039d..1562c27a2fa 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -37,15 +37,12 @@
#define EXOFS_DBGMSG2(M...) do {} while (0)
-enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
-
unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
- unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+ unsigned pages = min_t(unsigned, expected_pages,
+ layout->max_io_length / PAGE_SIZE);
- /* TODO: easily support bio chaining */
- pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
return pages;
}
@@ -101,7 +98,8 @@ static void _pcol_reset(struct page_collect *pcol)
* it might not end here. don't be left with nothing
*/
if (!pcol->expected_pages)
- pcol->expected_pages = MAX_PAGES_KMALLOC;
+ pcol->expected_pages =
+ exofs_max_io_pages(&pcol->sbi->layout, ~0);
}
static int pcol_try_alloc(struct page_collect *pcol)
@@ -389,6 +387,8 @@ static int readpage_strip(void *data, struct page *page)
size_t len;
int ret;
+ BUG_ON(!PageLocked(page));
+
/* FIXME: Just for debugging, will be removed */
if (PageUptodate(page))
EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@ -572,8 +572,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
if (!pcol->that_locked_page ||
(pcol->that_locked_page->index != index)) {
- struct page *page = find_get_page(pcol->inode->i_mapping, index);
+ struct page *page;
+ loff_t i_size = i_size_read(pcol->inode);
+
+ if (offset >= i_size) {
+ *uptodate = true;
+ EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
+ return ZERO_PAGE(0);
+ }
+ page = find_get_page(pcol->inode->i_mapping, index);
if (!page) {
page = find_or_create_page(pcol->inode->i_mapping,
index, GFP_NOFS);
@@ -602,12 +610,13 @@ static void __r4w_put_page(void *priv, struct page *page)
{
struct page_collect *pcol = priv;
- if (pcol->that_locked_page != page) {
+ if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
EXOFS_DBGMSG("index=0x%lx\n", page->index);
page_cache_release(page);
return;
}
- EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
+ EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
+ ZERO_PAGE(0) == page ? -1 : page->index);
}
static const struct _ore_r4w_op _r4w_op = {
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 24a49d47e93..1585db1aa36 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
- bio, per_dev->length);
+ osd_req_write(or, _ios_obj(ios, cur_comp),
+ per_dev->offset, bio, per_dev->length);
ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
(ios->si.unit_off + ios->length >
ios->layout->stripe_unit));
- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
+ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
per_dev->offset,
ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(ios->length), per_dev->dev);
} else {
- osd_req_set_attributes(or, _ios_obj(ios, dev));
+ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
ios->out_attr_len, dev);
}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 433783624d1..dde41a75c7c 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -400,8 +400,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
ret = ore_write(ios);
if (unlikely(ret))
EXOFS_ERR("%s: ore_write failed.\n", __func__);
- else
- sb->s_dirt = 0;
unlock_super(sb);
@@ -412,14 +410,6 @@ out:
return ret;
}
-static void exofs_write_super(struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- exofs_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
-}
-
static void _exofs_print_device(const char *msg, const char *dev_path,
struct osd_dev *od, u64 pid)
{
@@ -952,7 +942,6 @@ static const struct super_operations exofs_sops = {
.write_inode = exofs_write_inode,
.evict_inode = exofs_evict_inode,
.put_super = exofs_put_super,
- .write_super = exofs_write_super,
.sync_fs = exofs_sync_fs,
.statfs = exofs_statfs,
};
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9a4a5c48b1c..ff574b4e345 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3072,6 +3072,8 @@ static int ext3_do_update_inode(handle_t *handle,
struct ext3_inode_info *ei = EXT3_I(inode);
struct buffer_head *bh = iloc->bh;
int err = 0, rc, block;
+ int need_datasync = 0;
+ __le32 disksize;
uid_t i_uid;
gid_t i_gid;
@@ -3113,7 +3115,11 @@ again:
raw_inode->i_gid_high = 0;
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
- raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+ disksize = cpu_to_le32(ei->i_disksize);
+ if (disksize != raw_inode->i_size) {
+ need_datasync = 1;
+ raw_inode->i_size = disksize;
+ }
raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
@@ -3129,8 +3135,11 @@ again:
if (!S_ISREG(inode->i_mode)) {
raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
} else {
- raw_inode->i_size_high =
- cpu_to_le32(ei->i_disksize >> 32);
+ disksize = cpu_to_le32(ei->i_disksize >> 32);
+ if (disksize != raw_inode->i_size_high) {
+ raw_inode->i_size_high = disksize;
+ need_datasync = 1;
+ }
if (ei->i_disksize > 0x7fffffffULL) {
struct super_block *sb = inode->i_sb;
if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
@@ -3183,6 +3192,8 @@ again:
ext3_clear_inode_state(inode, EXT3_STATE_NEW);
atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
+ if (need_datasync)
+ atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
out_brelse:
brelse (bh);
ext3_std_error(inode->i_sb, err);
@@ -3459,14 +3470,6 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
- *
- * Is this efficient/effective? Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O. But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out. One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory. It has the desired
- * effect.
*/
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index ff9bcdc5b0d..8c892e93d8e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -64,11 +64,6 @@ static int ext3_freeze(struct super_block *sb);
/*
* Wrappers for journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
*/
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -90,12 +85,6 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
return journal_start(journal, nblocks);
}
-/*
- * The only special thing we need to do here is to make sure that all
- * journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
int __ext3_journal_stop(const char *where, handle_t *handle)
{
struct super_block *sb;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index d23b31ca9d7..1b5089067d0 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -280,14 +280,18 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
-static int ext4_valid_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *desc,
- unsigned int block_group,
- struct buffer_head *bh)
+/*
+ * Return the block number which was discovered to be invalid, or 0 if
+ * the block bitmap is valid.
+ */
+static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
+ struct ext4_group_desc *desc,
+ unsigned int block_group,
+ struct buffer_head *bh)
{
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
- ext4_fsblk_t bitmap_blk;
+ ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
@@ -297,37 +301,33 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
* or it has to also read the block group where the bitmaps
* are located to verify they are set.
*/
- return 1;
+ return 0;
}
group_first_block = ext4_group_first_block_no(sb, block_group);
/* check whether block bitmap block number is set */
- bitmap_blk = ext4_block_bitmap(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_block_bitmap(sb, desc);
+ offset = blk - group_first_block;
if (!ext4_test_bit(offset, bh->b_data))
/* bad block bitmap */
- goto err_out;
+ return blk;
/* check whether the inode bitmap block number is set */
- bitmap_blk = ext4_inode_bitmap(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_inode_bitmap(sb, desc);
+ offset = blk - group_first_block;
if (!ext4_test_bit(offset, bh->b_data))
/* bad block bitmap */
- goto err_out;
+ return blk;
/* check whether the inode table block number is set */
- bitmap_blk = ext4_inode_table(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_inode_table(sb, desc);
+ offset = blk - group_first_block;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
offset + EXT4_SB(sb)->s_itb_per_group,
offset);
- if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
- /* good bitmap for inode tables */
- return 1;
-
-err_out:
- ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
- block_group, bitmap_blk);
+ if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
+ /* bad bitmap for inode tables */
+ return blk;
return 0;
}
@@ -336,14 +336,26 @@ void ext4_validate_block_bitmap(struct super_block *sb,
unsigned int block_group,
struct buffer_head *bh)
{
+ ext4_fsblk_t blk;
+
if (buffer_verified(bh))
return;
ext4_lock_group(sb, block_group);
- if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
- ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8))
- set_buffer_verified(bh);
+ blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
+ block_group, blk);
+ return;
+ }
+ if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+ desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+ return;
+ }
+ set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
}
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index f8716eab999..5c2d1813ebe 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -79,7 +79,6 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
if (provided == calculated)
return 1;
- ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
return 0;
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index cd0c7ed0677..aabbb3f5368 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2662,6 +2662,7 @@ cont:
}
path[0].p_depth = depth;
path[0].p_hdr = ext_inode_hdr(inode);
+ i = 0;
if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
err = -EIO;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6324f74e034..dff171c3a12 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1970,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
- * - shrink_page_list via pdflush (no journal handle)
+ * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
@@ -4589,14 +4589,6 @@ static int ext4_expand_extra_isize(struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
- *
- * Is this efficient/effective? Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O. But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out. One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory. It has the desired
- * effect.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d76ec8277d3..c6e0cb3d1f4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -326,11 +326,6 @@ static void ext4_put_nojournal(handle_t *handle)
/*
* Wrappers for jbd2_journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -356,12 +351,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
return jbd2_journal_start(journal, nblocks);
}
-/*
- * The only special thing we need to do here is to make sure that all
- * jbd2_journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
@@ -959,6 +948,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
+ ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -3119,6 +3109,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
int s, j, count = 0;
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+ return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+ sbi->s_itb_per_group + 2);
+
first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
(grp * EXT4_BLOCKS_PER_GROUP(sb));
last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
@@ -4430,6 +4424,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
ext4_commit_super(sb, 1);
jbd2_journal_clear_err(journal);
+ jbd2_journal_update_sb_errno(journal);
}
}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 03ff5b1eba9..75a20c092dd 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -117,7 +117,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned val;
+ unsigned uninitialized_var(val);
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
@@ -154,7 +154,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned val;
+ unsigned uninitialized_var(val);
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 3426521f320..ee8d5504229 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -396,7 +396,7 @@ err_device:
err_region:
unregister_chrdev_region(devt, 1);
err:
- fc->conn_error = 1;
+ fuse_conn_kill(fc);
goto out;
}
@@ -532,8 +532,6 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
cdev_del(cc->cdev);
}
- /* kill connection and shutdown channel */
- fuse_conn_kill(&cc->fc);
rc = fuse_dev_release(inode, file); /* puts the base reference */
return rc;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 7df2b5e8fbe..f4246cfc8d8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1576,6 +1576,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->pages[req->num_pages] = page;
req->num_pages++;
+ offset = 0;
num -= this_num;
total_len += this_num;
index++;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8964cf3999b..324bc085053 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -383,6 +383,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_entry_out outentry;
struct fuse_file *ff;
+ /* Userspace expects S_IFREG in create mode */
+ BUG_ON((mode & S_IFMT) != S_IFREG);
+
forget = fuse_alloc_forget();
err = -ENOMEM;
if (!forget)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 93d8d6c9494..aba15f1b7ad 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -703,13 +703,16 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
- if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
+ /*
+ * In auto invalidate mode, always update attributes on read.
+ * Otherwise, only update if we attempt to read past EOF (to ensure
+ * i_size is up to date).
+ */
+ if (fc->auto_inval_data ||
+ (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
int err;
- /*
- * If trying to read past EOF, make sure the i_size
- * attribute is up-to-date.
- */
err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
if (err)
return err;
@@ -1700,7 +1703,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
size_t n;
u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
- for (n = 0; n < count; n++) {
+ for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
return -ENOMEM;
max -= iov->iov_len;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 771fb6322c0..e24dd74e306 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -484,6 +484,9 @@ struct fuse_conn {
/** Is fallocate not implemented by fs? */
unsigned no_fallocate:1;
+ /** Use enhanced/automatic page cache invalidation. */
+ unsigned auto_inval_data:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1cd61652018..fca222dabe3 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -197,6 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
loff_t oldsize;
+ struct timespec old_mtime;
spin_lock(&fc->lock);
if (attr_version != 0 && fi->attr_version > attr_version) {
@@ -204,15 +205,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
+ old_mtime = inode->i_mtime;
fuse_change_attributes_common(inode, attr, attr_valid);
oldsize = inode->i_size;
i_size_write(inode, attr->size);
spin_unlock(&fc->lock);
- if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
- truncate_pagecache(inode, oldsize, attr->size);
- invalidate_inode_pages2(inode->i_mapping);
+ if (S_ISREG(inode->i_mode)) {
+ bool inval = false;
+
+ if (oldsize != attr->size) {
+ truncate_pagecache(inode, oldsize, attr->size);
+ inval = true;
+ } else if (fc->auto_inval_data) {
+ struct timespec new_mtime = {
+ .tv_sec = attr->mtime,
+ .tv_nsec = attr->mtimensec,
+ };
+
+ /*
+ * Auto inval mode also checks and invalidates if mtime
+ * has changed.
+ */
+ if (!timespec_equal(&old_mtime, &new_mtime))
+ inval = true;
+ }
+
+ if (inval)
+ invalidate_inode_pages2(inode->i_mapping);
}
}
@@ -346,11 +367,6 @@ void fuse_conn_kill(struct fuse_conn *fc)
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
wake_up_all(&fc->reserved_req_waitq);
- mutex_lock(&fuse_mutex);
- list_del(&fc->entry);
- fuse_ctl_remove_conn(fc);
- mutex_unlock(&fuse_mutex);
- fuse_bdi_destroy(fc);
}
EXPORT_SYMBOL_GPL(fuse_conn_kill);
@@ -359,7 +375,14 @@ static void fuse_put_super(struct super_block *sb)
struct fuse_conn *fc = get_fuse_conn_super(sb);
fuse_send_destroy(fc);
+
fuse_conn_kill(fc);
+ mutex_lock(&fuse_mutex);
+ list_del(&fc->entry);
+ fuse_ctl_remove_conn(fc);
+ mutex_unlock(&fuse_mutex);
+ fuse_bdi_destroy(fc);
+
fuse_conn_put(fc);
}
@@ -834,6 +857,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->big_writes = 1;
if (arg->flags & FUSE_DONT_MASK)
fc->dont_mask = 1;
+ if (arg->flags & FUSE_AUTO_INVAL_DATA)
+ fc->auto_inval_data = 1;
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -859,7 +884,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
- FUSE_FLOCK_LOCKS;
+ FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+ FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3a56c8d94de..22255d96b27 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from pdflush and kswapd
+ * potentially cause a busy-wait loop from flusher thread and kswapd
* activity, but those code paths have their own higher-level
* throttling.
*/
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 5fd51a5833f..b7ec224910c 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -236,10 +236,10 @@ out:
* hfs_mdb_commit()
*
* Description:
- * This updates the MDB on disk (look also at hfs_write_super()).
+ * This updates the MDB on disk.
* It does not check, if the superblock has been modified, or
* if the filesystem has been mounted read-only. It is mainly
- * called by hfs_write_super() and hfs_btree_extend().
+ * called by hfs_sync_fs() and flush_mdb().
* Input Variable(s):
* struct hfs_mdb *mdb: Pointer to the hfs MDB
* int backup;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 425c2f2cf17..a2862339323 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If ext3_write_super() recently started a commit, then we
- * have to wait for completion of that transaction
+ * If commit has been started, then we have to wait for
+ * completion of that transaction.
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
@@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal)
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
spin_lock(&journal->j_state_lock);
+ /* Is it already empty? */
+ if (sb->s_start == 0) {
+ spin_unlock(&journal->j_state_lock);
+ return;
+ }
jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
journal->j_tail_sequence);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e9a3c4c8559..e149b99a7ff 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If ext3_write_super() recently started a commit, then we
- * have to wait for completion of that transaction
+ * If commit has been started, then we have to wait for
+ * completion of that transaction.
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
@@ -1377,7 +1377,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
* Update a journal's errno. Write updated superblock to disk waiting for IO
* to complete.
*/
-static void jbd2_journal_update_sb_errno(journal_t *journal)
+void jbd2_journal_update_sb_errno(journal_t *journal)
{
journal_superblock_t *sb = journal->j_superblock;
@@ -1390,6 +1390,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)
jbd2_write_superblock(journal, WRITE_SYNC);
}
+EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
/*
* Read the superblock for a given journal, performing initial
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index df0de27c273..e784a217b50 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
struct completion complete;
bio_init(&bio);
+ bio.bi_max_vecs = 1;
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
bio_vec.bv_len = PAGE_SIZE;
@@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
struct address_space *mapping = super->s_mapping_inode->i_mapping;
struct bio *bio;
struct page *page;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+ unsigned int max_pages;
int i;
- if (max_pages > BIO_MAX_PAGES)
- max_pages = BIO_MAX_PAGES;
+ max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+
bio = bio_alloc(GFP_NOFS, max_pages);
BUG_ON(!bio);
@@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
{
struct logfs_super *super = logfs_super(sb);
struct bio *bio;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+ unsigned int max_pages;
int i;
- if (max_pages > BIO_MAX_PAGES)
- max_pages = BIO_MAX_PAGES;
+ max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+
bio = bio_alloc(GFP_NOFS, max_pages);
BUG_ON(!bio);
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index a422f42238b..6984562738d 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, logfs_i_callback);
}
+static void __logfs_destroy_meta_inode(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ BUG_ON(li->li_block);
+ call_rcu(&inode->i_rcu, logfs_i_callback);
+}
+
static void logfs_destroy_inode(struct inode *inode)
{
struct logfs_inode *li = logfs_inode(inode);
+ if (inode->i_ino < LOGFS_RESERVED_INOS) {
+ /*
+ * The reserved inodes are never destroyed unless we are in
+ * unmont path.
+ */
+ __logfs_destroy_meta_inode(inode);
+ return;
+ }
+
BUG_ON(list_empty(&li->li_freeing_list));
spin_lock(&logfs_inode_lock);
li->li_refcount--;
@@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
/* kill the meta-inodes */
- iput(super->s_master_inode);
iput(super->s_segfile_inode);
+ iput(super->s_master_inode);
iput(super->s_mapping_inode);
}
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 1e1c369df22..2a09b8d7398 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area,
index = ofs >> PAGE_SHIFT;
page_ofs = ofs & (PAGE_SIZE - 1);
- page = find_lock_page(mapping, index);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
BUG_ON(!page);
memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
unlock_page(page);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index f1cb512c501..5be0abef603 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode)
return;
}
- BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
page = inode_to_page(inode);
BUG_ON(!page); /* FIXME: Use emergency page */
logfs_put_write_page(page);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index e28d090c98d..038da099179 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)
static void map_invalidatepage(struct page *page, unsigned long l)
{
- BUG();
+ return;
}
static int map_releasepage(struct page *page, gfp_t g)
diff --git a/fs/namei.c b/fs/namei.c
index 1b464390dde..dd1ed1b8e98 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)
/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
+ * @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
@@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1;
/**
* may_follow_link - Check symlink following for unsafe situations
* @link: The path of the symlink
+ * @nd: nameidata pathwalk data
*
* In the case of the sysctl_protected_symlinks sysctl being enabled,
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
@@ -2414,7 +2416,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
goto out;
}
- mode = op->mode & S_IALLUGO;
+ mode = op->mode;
if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
mode &= ~current_umask();
@@ -2452,7 +2454,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
}
if (open_flag & O_CREAT) {
- error = may_o_create(&nd->path, dentry, op->mode);
+ error = may_o_create(&nd->path, dentry, mode);
if (error) {
create_error = error;
if (open_flag & O_EXCL)
@@ -2489,6 +2491,10 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
dput(dentry);
dentry = file->f_path.dentry;
}
+ if (create_error && dentry->d_inode == NULL) {
+ error = create_error;
+ goto out;
+ }
goto looked_up;
}
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8bf3a3f6925..b7db60897f9 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
-obj-$(CONFIG_NFS_V2) += nfs2.o
-nfs2-y := nfs2super.o proc.o nfs2xdr.o
+obj-$(CONFIG_NFS_V2) += nfsv2.o
+nfsv2-y := nfs2super.o proc.o nfs2xdr.o
-obj-$(CONFIG_NFS_V3) += nfs3.o
-nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
-nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
+obj-$(CONFIG_NFS_V3) += nfsv3.o
+nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
+nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
-obj-$(CONFIG_NFS_V4) += nfs4.o
-nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
+obj-$(CONFIG_NFS_V4) += nfsv4.o
+nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o nfs4getroot.o nfs4client.o
-nfs4-$(CONFIG_SYSCTL) += nfs4sysctl.o
-nfs4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
+nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
+nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9fc0d9dfc91..99694442b93 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version)
if (IS_ERR(nfs)) {
mutex_lock(&nfs_version_mutex);
- request_module("nfs%d", version);
+ request_module("nfsv%d", version);
nfs = find_nfs_version(version);
mutex_unlock(&nfs_version_mutex);
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 75d6d0a3d32..6a7fcab7ecb 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -287,10 +287,12 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct inode *inode = file->f_path.dentry->d_inode;
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret != 0)
+ goto out;
mutex_lock(&inode->i_mutex);
ret = nfs_file_fsync_commit(file, start, end, datasync);
mutex_unlock(&inode->i_mutex);
-
+out:
return ret;
}
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b701358c39c..a850079467d 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -61,6 +61,12 @@ struct idmap {
struct mutex idmap_mutex;
};
+struct idmap_legacy_upcalldata {
+ struct rpc_pipe_msg pipe_msg;
+ struct idmap_msg idmap_msg;
+ struct idmap *idmap;
+};
+
/**
* nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
* @fattr: fully initialised struct nfs_fattr
@@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
name, namelen, type, data,
data_size, idmap);
+ idmap->idmap_key_cons = NULL;
mutex_unlock(&idmap->idmap_mutex);
}
return ret;
@@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = {
static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
+static void idmap_release_pipe(struct inode *);
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static const struct rpc_pipe_ops idmap_upcall_ops = {
.upcall = rpc_pipe_generic_upcall,
.downcall = idmap_pipe_downcall,
+ .release_pipe = idmap_release_pipe,
.destroy_msg = idmap_pipe_destroy_msg,
};
@@ -616,7 +625,8 @@ void nfs_idmap_quit(void)
nfs_idmap_quit_keyring();
}
-static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im,
+static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
+ struct idmap_msg *im,
struct rpc_pipe_msg *msg)
{
substring_t substr;
@@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
const char *op,
void *aux)
{
+ struct idmap_legacy_upcalldata *data;
struct rpc_pipe_msg *msg;
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
@@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
int ret = -ENOMEM;
/* msg and im are freed in idmap_pipe_destroy_msg */
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- goto out0;
-
- im = kmalloc(sizeof(*im), GFP_KERNEL);
- if (!im)
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
goto out1;
- ret = nfs_idmap_prepare_message(key->description, im, msg);
+ msg = &data->pipe_msg;
+ im = &data->idmap_msg;
+ data->idmap = idmap;
+
+ ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
if (ret < 0)
goto out2;
@@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
- goto out2;
+ goto out3;
return ret;
+out3:
+ idmap->idmap_key_cons = NULL;
out2:
- kfree(im);
+ kfree(data);
out1:
- kfree(msg);
-out0:
complete_request_key(cons, ret);
return ret;
}
@@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
}
if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
- ret = mlen;
- complete_request_key(cons, -ENOKEY);
- goto out_incomplete;
+ ret = -ENOKEY;
+ goto out;
}
namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
@@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
out:
complete_request_key(cons, ret);
-out_incomplete:
return ret;
}
static void
idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
{
+ struct idmap_legacy_upcalldata *data = container_of(msg,
+ struct idmap_legacy_upcalldata,
+ pipe_msg);
+ struct idmap *idmap = data->idmap;
+ struct key_construction *cons;
+ if (msg->errno) {
+ cons = ACCESS_ONCE(idmap->idmap_key_cons);
+ idmap->idmap_key_cons = NULL;
+ complete_request_key(cons, msg->errno);
+ }
/* Free memory allocated in nfs_idmap_legacy_upcall() */
- kfree(msg->data);
- kfree(msg);
+ kfree(data);
+}
+
+static void
+idmap_release_pipe(struct inode *inode)
+{
+ struct rpc_inode *rpci = RPC_I(inode);
+ struct idmap *idmap = (struct idmap *)rpci->private;
+ idmap->idmap_key_cons = NULL;
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c6e895f0fbf..9b47610338f 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -154,7 +154,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = jiffies;
- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+ memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
else
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 0952c791df3..69322096c32 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
nfs_fattr_init(info->fattr);
status = rpc_call_sync(client, &msg, 0);
dprintk("%s: reply fsinfo: %d\n", __func__, status);
- if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
+ if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
msg.rpc_resp = info->fattr;
status = rpc_call_sync(client, &msg, 0);
@@ -643,7 +643,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
- __be32 *verf = NFS_COOKIEVERF(dir);
+ __be32 *verf = NFS_I(dir)->cookieverf;
struct nfs3_readdirargs arg = {
.fh = NFS_FH(dir),
.cookie = cookie,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3b950dd81e8..da0618aeead 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations;
int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
unsigned, umode_t, int *);
+/* super.c */
+extern struct file_system_type nfs4_fs_type;
+
/* nfs4namespace.c */
rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index cbcdfaf3250..24eb663f8ed 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
return clp;
error:
- kfree(clp);
+ nfs_free_client(clp);
return ERR_PTR(err);
}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index acb65e7887f..eb5eb8eef4d 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -96,13 +96,15 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct inode *inode = file->f_path.dentry->d_inode;
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret != 0)
+ goto out;
mutex_lock(&inode->i_mutex);
ret = nfs_file_fsync_commit(file, start, end, datasync);
if (!ret && !datasync)
/* application has asked for meta-data sync */
ret = pnfs_layoutcommit_inode(inode, true);
mutex_unlock(&inode->i_mutex);
-
+out:
return ret;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a99a8d94872..1e50326d00d 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3215,11 +3215,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
- nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+ nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
res.pgbase = args.pgbase;
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
- memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+ memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
}
@@ -3653,11 +3653,11 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
}
-/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
- * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
+/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
+ * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
* the stack.
*/
-#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
@@ -3668,7 +3668,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen,
spages = pages;
do {
- len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+ len = min_t(size_t, PAGE_SIZE, buflen);
newpage = alloc_page(GFP_KERNEL);
if (newpage == NULL)
@@ -3737,9 +3737,10 @@ out:
static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
{
struct nfs4_cached_acl *acl;
+ size_t buflen = sizeof(*acl) + acl_len;
- if (pages && acl_len <= PAGE_SIZE) {
- acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
+ if (buflen <= PAGE_SIZE) {
+ acl = kmalloc(buflen, GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 1;
@@ -3781,17 +3782,15 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
- int ret = -ENOMEM, npages, i;
- size_t acl_len = 0;
+ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+ int ret = -ENOMEM, i;
- npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
-
- /* Add an extra page to handle the bitmap returned */
- npages++;
+ if (npages > ARRAY_SIZE(pages))
+ return -ERANGE;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
@@ -3807,11 +3806,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
- /* Let decode_getfacl know not to fail if the ACL data is larger than
- * the page we send as a guess */
- if (buf == NULL)
- res.acl_flags |= NFS4_ACL_LEN_REQUEST;
-
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
@@ -3819,20 +3813,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
if (ret)
goto out_free;
- acl_len = res.acl_len - res.acl_data_offset;
- if (acl_len > args.acl_len)
- nfs4_write_cached_acl(inode, NULL, 0, acl_len);
- else
- nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
- acl_len);
- if (buf) {
+ /* Handle the case where the passed-in buffer is too short */
+ if (res.acl_flags & NFS4_ACL_TRUNC) {
+ /* Did the user only issue a request for the acl length? */
+ if (buf == NULL)
+ goto out_ok;
ret = -ERANGE;
- if (acl_len > buflen)
- goto out_free;
- _copy_from_pages(buf, pages, res.acl_data_offset,
- acl_len);
+ goto out_free;
}
- ret = acl_len;
+ nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
+ if (buf)
+ _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
+out_ok:
+ ret = res.acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
@@ -3890,10 +3883,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
.rpc_argp = &arg,
.rpc_resp = &res,
};
+ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
int ret, i;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
+ if (npages > ARRAY_SIZE(pages))
+ return -ERANGE;
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
if (i < 0)
return i;
@@ -6223,11 +6219,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
dprintk("<-- %s\n", __func__);
}
+static size_t max_response_pages(struct nfs_server *server)
+{
+ u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ return nfs_page_array_len(0, max_resp_sz);
+}
+
+static void nfs4_free_pages(struct page **pages, size_t size)
+{
+ int i;
+
+ if (!pages)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (!pages[i])
+ break;
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+}
+
+static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
+{
+ struct page **pages;
+ int i;
+
+ pages = kcalloc(size, sizeof(struct page *), gfp_flags);
+ if (!pages) {
+ dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
+ return NULL;
+ }
+
+ for (i = 0; i < size; i++) {
+ pages[i] = alloc_page(gfp_flags);
+ if (!pages[i]) {
+ dprintk("%s: failed to allocate page\n", __func__);
+ nfs4_free_pages(pages, size);
+ return NULL;
+ }
+ }
+
+ return pages;
+}
+
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ size_t max_pages = max_response_pages(server);
dprintk("--> %s\n", __func__);
+ nfs4_free_pages(lgp->args.layout.pages, max_pages);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
@@ -6239,9 +6282,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
.rpc_release = nfs4_layoutget_release,
};
-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
{
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ size_t max_pages = max_response_pages(server);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
@@ -6259,12 +6303,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
dprintk("--> %s\n", __func__);
+ lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
+ if (!lgp->args.layout.pages) {
+ nfs4_layoutget_release(lgp);
+ return;
+ }
+ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
- return PTR_ERR(task);
+ return;
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
@@ -6272,7 +6323,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
status = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
- return status;
+ return;
}
static void
@@ -6304,12 +6355,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
return;
}
spin_lock(&lo->plh_inode->i_lock);
- if (task->tk_status == 0) {
- if (lrp->res.lrs_present) {
- pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
- } else
- BUG_ON(!list_empty(&lo->plh_segs));
- }
+ if (task->tk_status == 0 && lrp->res.lrs_present)
+ pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 12a31a9dbcd..bd61221ad2c 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data);
-static struct file_system_type nfs4_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs_fs_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
-};
-
static struct file_system_type nfs4_remote_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -344,14 +336,8 @@ static int __init init_nfs_v4(void)
if (err)
goto out1;
- err = register_filesystem(&nfs4_fs_type);
- if (err < 0)
- goto out2;
-
register_nfs_version(&nfs_v4);
return 0;
-out2:
- nfs4_unregister_sysctl();
out1:
nfs_idmap_quit();
out:
@@ -361,7 +347,6 @@ out:
static void __exit exit_nfs_v4(void)
{
unregister_nfs_version(&nfs_v4);
- unregister_filesystem(&nfs4_fs_type);
nfs4_unregister_sysctl();
nfs_idmap_quit();
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index ca13483edd6..8dba6bd4855 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
struct nfs_getaclres *res)
{
unsigned int savep;
- __be32 *bm_p;
uint32_t attrlen,
bitmap[3] = {0};
int status;
- size_t page_len = xdr->buf->page_len;
+ unsigned int pg_offset;
res->acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
- bm_p = xdr->p;
- res->acl_data_offset = be32_to_cpup(bm_p) + 2;
- res->acl_data_offset <<= 2;
- /* Check if the acl data starts beyond the allocated buffer */
- if (res->acl_data_offset > page_len)
- return -ERANGE;
+ xdr_enter_page(xdr, xdr->buf->page_len);
+
+ /* Calculate the offset of the page data */
+ pg_offset = xdr->buf->head[0].iov_len;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
@@ -5074,23 +5071,16 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
/* The bitmap (xdr len + bitmaps) and the attr xdr len words
* are stored with the acl data to handle the problem of
* variable length bitmaps.*/
- xdr->p = bm_p;
-
- /* We ignore &savep and don't do consistency checks on
- * the attr length. Let userspace figure it out.... */
- attrlen += res->acl_data_offset;
- if (attrlen > page_len) {
- if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
- /* getxattr interface called with a NULL buf */
- res->acl_len = attrlen;
- goto out;
- }
- dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
- attrlen, page_len);
- return -EINVAL;
- }
- xdr_read_pages(xdr, attrlen);
+ res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
res->acl_len = attrlen;
+
+ /* Check for receive buffer overflow */
+ if (res->acl_len > (xdr->nwords << 2) ||
+ res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
+ res->acl_flags |= NFS4_ACL_TRUNC;
+ dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
+ attrlen, xdr->nwords << 2);
+ }
} else
status = -EOPNOTSUPP;
@@ -6235,7 +6225,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_open(xdr, res);
if (status)
goto out;
- if (decode_getfh(xdr, &res->fh) != 0)
+ status = decode_getfh(xdr, &res->fh);
+ if (status)
goto out;
decode_getfattr(xdr, res->f_attr, res->server);
out:
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index f50d3e8d6f2..ea6d111b03e 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
return false;
return pgio->pg_count + req->wb_bytes <=
- OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+ (unsigned long)pgio->pg_layout_private;
+}
+
+void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ pnfs_generic_pg_init_read(pgio, req);
+ if (unlikely(pgio->pg_lseg == NULL))
+ return; /* Not pNFS */
+
+ pgio->pg_layout_private = (void *)
+ OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+}
+
+static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
+ unsigned long *stripe_end)
+{
+ u32 stripe_off;
+ unsigned stripe_size;
+
+ if (layout->raid_algorithm == PNFS_OSD_RAID_0)
+ return true;
+
+ stripe_size = layout->stripe_unit *
+ (layout->group_width - layout->parity);
+
+ div_u64_rem(offset, stripe_size, &stripe_off);
+ if (!stripe_off)
+ return true;
+
+ *stripe_end = stripe_size - stripe_off;
+ return false;
+}
+
+void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ unsigned long stripe_end = 0;
+
+ pnfs_generic_pg_init_write(pgio, req);
+ if (unlikely(pgio->pg_lseg == NULL))
+ return; /* Not pNFS */
+
+ if (req->wb_offset ||
+ !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE,
+ &OBJIO_LSEG(pgio->pg_lseg)->layout,
+ &stripe_end)) {
+ pgio->pg_layout_private = (void *)stripe_end;
+ } else {
+ pgio->pg_layout_private = (void *)
+ OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+ }
}
static const struct nfs_pageio_ops objio_pg_read_ops = {
- .pg_init = pnfs_generic_pg_init_read,
+ .pg_init = objio_init_read,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops objio_pg_write_ops = {
- .pg_init = pnfs_generic_pg_init_write,
+ .pg_init = objio_init_write,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1a6732ed04a..311a79681e2 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
hdr->io_start = req_offset(hdr->req);
hdr->good_bytes = desc->pg_count;
hdr->dreq = desc->pg_dreq;
+ hdr->layout_private = desc->pg_layout_private;
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
@@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_error = 0;
desc->pg_lseg = NULL;
desc->pg_dreq = NULL;
+ desc->pg_layout_private = NULL;
}
EXPORT_SYMBOL_GPL(nfs_pageio_init);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 76875bfcf19..2e00feacd4b 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
struct pnfs_layout_segment *lseg = NULL;
- struct page **pages = NULL;
- int i;
- u32 max_resp_sz, max_pages;
dprintk("--> %s\n", __func__);
@@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
if (lgp == NULL)
return NULL;
- /* allocate pages for xdr post processing */
- max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
- max_pages = nfs_page_array_len(0, max_resp_sz);
-
- pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
- if (!pages)
- goto out_err_free;
-
- for (i = 0; i < max_pages; i++) {
- pages[i] = alloc_page(gfp_flags);
- if (!pages[i])
- goto out_err_free;
- }
-
lgp->args.minlength = PAGE_CACHE_SIZE;
if (lgp->args.minlength > range->length)
lgp->args.minlength = range->length;
@@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
- lgp->args.layout.pages = pages;
- lgp->args.layout.pglen = max_pages * PAGE_SIZE;
lgp->lsegpp = &lseg;
lgp->gfp_flags = gfp_flags;
/* Synchronously retrieve layout information from server and
* store in lseg.
*/
- nfs4_proc_layoutget(lgp);
+ nfs4_proc_layoutget(lgp, gfp_flags);
if (!lseg) {
/* remember that LAYOUTGET failed and suspend trying */
set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
}
- /* free xdr pages */
- for (i = 0; i < max_pages; i++)
- __free_page(pages[i]);
- kfree(pages);
-
return lseg;
-
-out_err_free:
- /* free any allocated xdr pages, lgp as it's not used */
- if (pages) {
- for (i = 0; i < max_pages; i++) {
- if (!pages[i])
- break;
- __free_page(pages[i]);
- }
- kfree(pages);
- }
- kfree(lgp);
- return NULL;
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2c6c80503ba..745aa1b39e7 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
struct pnfs_devicelist *devlist);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev);
-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
/* pnfs.c */
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ac6a3c55dce..b8eda700584 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops);
static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
static int nfs4_validate_mount_data(void *options,
struct nfs_parsed_mount_data *args, const char *dev_name);
+
+struct file_system_type nfs4_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs_fs_mount,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+EXPORT_SYMBOL_GPL(nfs4_fs_type);
+
+static int __init register_nfs4_fs(void)
+{
+ return register_filesystem(&nfs4_fs_type);
+}
+
+static void unregister_nfs4_fs(void)
+{
+ unregister_filesystem(&nfs4_fs_type);
+}
+#else
+static int __init register_nfs4_fs(void)
+{
+ return 0;
+}
+
+static void unregister_nfs4_fs(void)
+{
+}
#endif
static struct shrinker acl_shrinker = {
@@ -337,12 +365,18 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_0;
- ret = nfs_register_sysctl();
+ ret = register_nfs4_fs();
if (ret < 0)
goto error_1;
+
+ ret = nfs_register_sysctl();
+ if (ret < 0)
+ goto error_2;
register_shrinker(&acl_shrinker);
return 0;
+error_2:
+ unregister_nfs4_fs();
error_1:
unregister_filesystem(&nfs_fs_type);
error_0:
@@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void)
{
unregister_shrinker(&acl_shrinker);
nfs_unregister_sysctl();
+ unregister_nfs4_fs();
unregister_filesystem(&nfs_fs_type);
}
@@ -1832,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
+ args->nfs_server.port = ntohs(data->addr.sin_port);
if (!nfs_verify_server_address(sap))
goto out_no_address;
@@ -2529,6 +2565,7 @@ static int nfs4_validate_mount_data(void *options,
return -EFAULT;
if (!nfs_verify_server_address(sap))
goto out_no_address;
+ args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
if (data->auth_flavourlen) {
if (data->auth_flavourlen > 1)
@@ -2645,4 +2682,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
module_param(send_implementation_id, ushort, 0644);
MODULE_PARM_DESC(send_implementation_id,
"Send implementation ID with NFSv4.1 exchange_id");
+MODULE_ALIAS("nfs4");
+
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5829d0ce7cf..e3b55372726 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void)
nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
nfs_wdata_cachep);
if (nfs_wdata_mempool == NULL)
- return -ENOMEM;
+ goto out_destroy_write_cache;
nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
sizeof(struct nfs_commit_data),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_cdata_cachep == NULL)
- return -ENOMEM;
+ goto out_destroy_write_mempool;
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
- return -ENOMEM;
+ goto out_destroy_commit_cache;
/*
* NFS congestion size, scale with available memory.
@@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void)
nfs_congestion_kb = 256*1024;
return 0;
+
+out_destroy_commit_cache:
+ kmem_cache_destroy(nfs_cdata_cachep);
+out_destroy_write_mempool:
+ mempool_destroy(nfs_wdata_mempool);
+out_destroy_write_cache:
+ kmem_cache_destroy(nfs_wdata_cachep);
+ return -ENOMEM;
}
void nfs_destroy_writepagecache(void)
{
mempool_destroy(nfs_commit_mempool);
+ kmem_cache_destroy(nfs_cdata_cachep);
mempool_destroy(nfs_wdata_mempool);
kmem_cache_destroy(nfs_wdata_cachep);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index cbaf4f8bb7b..4c7bd35b187 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
- (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+ (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
return -EINVAL;
args.client_name = clp->cl_cred.cr_principal;
args.prognumber = conn->cb_prog,
args.protocol = XPRT_TRANSPORT_TCP;
- args.authflavor = clp->cl_flavor;
+ args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
if (!conn->cb_xprt)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e6173147f98..22bd0a66c35 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -231,7 +231,6 @@ struct nfs4_client {
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
struct sockaddr_storage cl_addr; /* client ipaddress */
- u32 cl_flavor; /* setclientid pseudoflavor */
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 6522cac6057..6a10812711c 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -676,17 +676,13 @@ static const struct super_operations nilfs_sops = {
.alloc_inode = nilfs_alloc_inode,
.destroy_inode = nilfs_destroy_inode,
.dirty_inode = nilfs_dirty_inode,
- /* .write_inode = nilfs_write_inode, */
- /* .drop_inode = nilfs_drop_inode, */
.evict_inode = nilfs_evict_inode,
.put_super = nilfs_put_super,
- /* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
.freeze_fs = nilfs_freeze,
.unfreeze_fs = nilfs_unfreeze,
.statfs = nilfs_statfs,
.remount_fs = nilfs_remount,
- /* .umount_begin */
.show_options = nilfs_show_options
};
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 6eee4177807..be1267a34ce 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -107,8 +107,6 @@ struct the_nilfs {
* used for
* - loading the latest checkpoint exclusively.
* - allocating a new full segment.
- * - protecting s_dirt in the super_block struct
- * (see nilfs_write_super) and the following fields.
*/
struct buffer_head *ns_sbh[2];
struct nilfs_super_block *ns_sbp[2];
diff --git a/fs/open.c b/fs/open.c
index f3d96e7e7b1..e1f2cdb91a4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -717,7 +717,7 @@ cleanup_all:
* here, so just reset the state.
*/
file_reset_write(f);
- mnt_drop_write(f->f_path.mnt);
+ __mnt_drop_write(f->f_path.mnt);
}
}
cleanup_file:
@@ -852,9 +852,10 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode;
- if (!(flags & O_CREAT))
- mode = 0;
- op->mode = mode;
+ if (flags & O_CREAT)
+ op->mode = (mode & S_IALLUGO) | S_IFREG;
+ else
+ op->mode = 0;
/* Must never be set by userspace */
flags &= ~FMODE_NONOTIFY;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 36a29b753c7..c495a3055e2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
goto out;
}
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt])
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4c0c7d163d1..a98b7740a0f 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
else if (bitmap == 0)
block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
- reiserfs_write_unlock(sb);
bh = sb_bread(sb, block);
- reiserfs_write_lock(sb);
if (bh == NULL)
reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
"reading failed", __func__, block);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a6d4268fb6c..855da58db14 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode)
;
}
out:
+ reiserfs_write_unlock_once(inode->i_sb, depth);
clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
dquot_drop(inode);
inode->i_blocks = 0;
- reiserfs_write_unlock_once(inode->i_sb, depth);
return;
no_delete:
diff --git a/fs/stat.c b/fs/stat.c
index b6ff11825fc..40780229a03 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(vfs_getattr);
int vfs_fstat(unsigned int fd, struct kstat *stat)
{
int fput_needed;
- struct file *f = fget_light(fd, &fput_needed);
+ struct file *f = fget_raw_light(fd, &fput_needed);
int error = -EBADF;
if (f) {
diff --git a/fs/super.c b/fs/super.c
index b05cf47463d..0902cfa6a12 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -537,46 +537,6 @@ void drop_super(struct super_block *sb)
EXPORT_SYMBOL(drop_super);
/**
- * sync_supers - helper for periodic superblock writeback
- *
- * Call the write_super method if present on all dirty superblocks in
- * the system. This is for the periodic writeback used by most older
- * filesystems. For data integrity superblock writeback use
- * sync_filesystems() instead.
- *
- * Note: check the dirty flag before waiting, so we don't
- * hold up the sync while mounting a device. (The newly
- * mounted device won't need syncing.)
- */
-void sync_supers(void)
-{
- struct super_block *sb, *p = NULL;
-
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
- if (sb->s_op->write_super && sb->s_dirt) {
- sb->s_count++;
- spin_unlock(&sb_lock);
-
- down_read(&sb->s_umount);
- if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
- sb->s_op->write_super(sb);
- up_read(&sb->s_umount);
-
- spin_lock(&sb_lock);
- if (p)
- __put_super(p);
- p = sb;
- }
- }
- if (p)
- __put_super(p);
- spin_unlock(&sb_lock);
-}
-
-/**
* iterate_supers - call function for all active superblocks
* @f: function to call
* @arg: argument to pass to it
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 8b8cc4e945f..760de723dad 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -167,7 +167,7 @@ struct ubifs_global_debug_info {
#define ubifs_dbg_msg(type, fmt, ...) \
pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
-#define DBG_KEY_BUF_LEN 32
+#define DBG_KEY_BUF_LEN 48
#define ubifs_dbg_msg_key(type, key, fmt, ...) do { \
char __tmp_key_buf[DBG_KEY_BUF_LEN]; \
pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 35389ca2d26..7bd6e72afd1 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -37,11 +37,11 @@
*
* A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
* implement. However, this is not true for 'ubifs_writepage()', which may be
- * called with @i_mutex unlocked. For example, when pdflush is doing background
- * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
- * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
- * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
- * we are only guaranteed that the page is locked.
+ * called with @i_mutex unlocked. For example, when flusher thread is doing
+ * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
+ * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
+ * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
+ * 'ubifs_writepage()' we are only guaranteed that the page is locked.
*
* Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
* read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index ce33b2beb15..8640920766e 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
return 0;
out_err:
- ubifs_lpt_free(c, 0);
+ if (wr)
+ ubifs_lpt_free(c, 1);
+ if (rd)
+ ubifs_lpt_free(c, 0);
return err;
}
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index c30d976b4be..edeec499c04 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
corrupted_rescan:
/* Re-scan the corrupted data with verbose messages */
- ubifs_err("corruptio %d", ret);
+ ubifs_err("corruption %d", ret);
ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
corrupted:
ubifs_scanned_corruption(c, lnum, offs, buf);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index eba46d4a761..94d78fc5d4e 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
c->replaying = 1;
lnum = c->ltail_lnum = c->lhead_lnum;
- lnum = UBIFS_LOG_LNUM;
do {
err = replay_log_leb(c, lnum, 0, c->sbuf);
if (err == 1)
@@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
if (err)
goto out;
lnum = ubifs_next_log_lnum(c, lnum);
- } while (lnum != UBIFS_LOG_LNUM);
+ } while (lnum != c->ltail_lnum);
err = replay_buds(c);
if (err)
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1c766c39c03..71a197f0f93 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
mutex_lock(&ui->ui_mutex);
/*
* Due to races between write-back forced by budgeting
- * (see 'sync_some_inodes()') and pdflush write-back, the inode may
+ * (see 'sync_some_inodes()') and background write-back, the inode may
* have already been synchronized, do not do this again. This might
* also happen if it was synchronized in an VFS operation, e.g.
* 'ubifs_link()'.
@@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c)
*
* This function mounts UBIFS file system. Returns zero in case of success and
* a negative error code in case of failure.
- *
- * Note, the function does not de-allocate resources it it fails half way
- * through, and the caller has to do this instead.
*/
static int mount_ubifs(struct ubifs_info *c)
{
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 7f3f7ba3df6..d1c6093fd3d 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -39,20 +39,24 @@
#include "udf_i.h"
#include "udf_sb.h"
-static int udf_adinicb_readpage(struct file *file, struct page *page)
+static void __udf_adinicb_readpage(struct page *page)
{
struct inode *inode = page->mapping->host;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
- BUG_ON(!PageLocked(page));
-
kaddr = kmap(page);
- memset(kaddr, 0, PAGE_CACHE_SIZE);
memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
+ memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap(page);
+}
+
+static int udf_adinicb_readpage(struct file *file, struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+ __udf_adinicb_readpage(page);
unlock_page(page);
return 0;
@@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page,
return 0;
}
+static int udf_adinicb_write_begin(struct file *file,
+ struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned flags, struct page **pagep,
+ void **fsdata)
+{
+ struct page *page;
+
+ if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
+ return -EIO;
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+
+ if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
+ __udf_adinicb_readpage(page);
+ return 0;
+}
+
static int udf_adinicb_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file,
const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
- .write_begin = simple_write_begin,
- .write_end = udf_adinicb_write_end,
+ .write_begin = udf_adinicb_write_begin,
+ .write_end = udf_adinicb_write_end,
};
static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fafaad795cd..aa233469b3c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize)
if (err)
return err;
down_write(&iinfo->i_data_sem);
- } else
+ } else {
iinfo->i_lenAlloc = newsize;
+ goto set_size;
+ }
}
err = udf_extend_file(inode, newsize);
if (err) {
up_write(&iinfo->i_data_sem);
return err;
}
+set_size:
truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
} else {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index dcbf98722af..18fc038a438 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
udf_err(sb, "error loading logical volume descriptor: "
"Partition table too long (%u > %lu)\n", table_len,
sb->s_blocksize - sizeof(*lvd));
+ ret = 1;
goto out_bh;
}
@@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
if (udf_load_sparable_map(sb, map,
- (struct sparablePartitionMap *)gpm) < 0)
+ (struct sparablePartitionMap *)gpm) < 0) {
+ ret = 1;
goto out_bh;
+ }
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_METADATA,
strlen(UDF_ID_METADATA))) {
@@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!silent)
pr_notice("Rescanning with blocksize %d\n",
UDF_DEFAULT_BLOCKSIZE);
+ brelse(sbi->s_lvid_bh);
+ sbi->s_lvid_bh = NULL;
uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index f9c3fe304a1..69cf4fcde03 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -179,12 +179,14 @@ xfs_ioc_trim(
* used by the fstrim application. In the end it really doesn't
* matter as trimming blocks is an advisory interface.
*/
+ if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
+ range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
+ return -XFS_ERROR(EINVAL);
+
start = BTOBB(range.start);
end = start + BTOBBT(range.len) - 1;
minlen = BTOBB(max_t(u64, granularity, range.minlen));
- if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
- return -XFS_ERROR(EINVAL);
if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 21e37b55f7e..5aceb3f8ecd 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -962,23 +962,22 @@ xfs_dialloc(
if (!pag->pagi_freecount && !okalloc)
goto nextag;
+ /*
+ * Then read in the AGI buffer and recheck with the AGI buffer
+ * lock held.
+ */
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
if (error)
goto out_error;
- /*
- * Once the AGI has been read in we have to recheck
- * pagi_freecount with the AGI buffer lock held.
- */
if (pag->pagi_freecount) {
xfs_perag_put(pag);
goto out_alloc;
}
- if (!okalloc) {
- xfs_trans_brelse(tp, agbp);
- goto nextag;
- }
+ if (!okalloc)
+ goto nextag_relse_buffer;
+
error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
if (error) {
@@ -1007,6 +1006,8 @@ xfs_dialloc(
return 0;
}
+nextag_relse_buffer:
+ xfs_trans_brelse(tp, agbp);
nextag:
xfs_perag_put(pag);
if (++agno == mp->m_sb.sb_agcount)
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 92d4331cd4f..ca28a4ba4b5 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -857,7 +857,7 @@ xfs_rtbuf_get(
xfs_buf_t *bp; /* block buffer, result */
xfs_inode_t *ip; /* bitmap or summary inode */
xfs_bmbt_irec_t map;
- int nmap;
+ int nmap = 1;
int error; /* error value */
ip = issum ? mp->m_rsumip : mp->m_rbmip;