aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/cache.c14
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/afs/file.c15
-rw-r--r--fs/bio.c28
-rw-r--r--fs/btrfs/extent-tree.c113
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c95
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/transaction.c19
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c130
-rw-r--r--fs/cifs/CHANGES9
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/inode.c7
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/readdir.c7
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c4
-rw-r--r--fs/exec.c12
-rw-r--r--fs/ext3/fsync.c36
-rw-r--r--fs/ext3/inode.c36
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/extents.c36
-rw-r--r--fs/ext4/inode.c24
-rw-r--r--fs/ext4/namei.c16
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fscache/Kconfig7
-rw-r--r--fs/fscache/Makefile1
-rw-r--r--fs/fscache/cache.c5
-rw-r--r--fs/fscache/cookie.c26
-rw-r--r--fs/fscache/internal.h56
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object-list.c432
-rw-r--r--fs/fscache/object.c104
-rw-r--r--fs/fscache/operation.c120
-rw-r--r--fs/fscache/page.c273
-rw-r--r--fs/fscache/proc.c13
-rw-r--r--fs/fscache/stats.c94
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c357
-rw-r--r--fs/gfs2/acl.h24
-rw-r--r--fs/gfs2/aops.c20
-rw-r--r--fs/gfs2/dir.c34
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c5
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/ops_fstype.c154
-rw-r--r--fs/gfs2/quota.c393
-rw-r--r--fs/gfs2/quota.h5
-rw-r--r--fs/gfs2/recovery.c4
-rw-r--r--fs/gfs2/rgrp.c14
-rw-r--r--fs/gfs2/super.c110
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c14
-rw-r--r--fs/gfs2/xattr.c74
-rw-r--r--fs/gfs2/xattr.h8
-rw-r--r--fs/inode.c10
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/read.c9
-rw-r--r--fs/namespace.c20
-rw-r--r--fs/nfs/fscache.c10
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/nilfs2/cpfile.c2
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/ioctl.c39
-rw-r--r--fs/nilfs2/segment.c17
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/ocfs2.h7
-rw-r--r--fs/ocfs2/refcounttree.c69
-rw-r--r--fs/ocfs2/super.c20
-rw-r--r--fs/ocfs2/uptodate.c5
-rw-r--r--fs/open.c27
-rw-r--r--fs/proc/array.c25
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/proc/stat.c19
-rw-r--r--fs/quota/Kconfig2
-rw-r--r--fs/quota/dquot.c93
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/sysfs/dir.c4
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_trans_ail.c23
100 files changed, 2845 insertions, 979 deletions
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 51c94e26a34..e777961939f 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
BUG_ON(!vcookie->fscache);
- if (PageFsCache(page)) {
- if (fscache_check_page_write(vcookie->fscache, page)) {
- if (!(gfp & __GFP_WAIT))
- return 0;
- fscache_wait_on_page_write(vcookie->fscache, page);
- }
-
- fscache_uncache_page(vcookie->fscache, page);
- ClearPageFsCache(page);
- }
-
- return 1;
+ return fscache_maybe_release_page(vcookie->fscache, page, gfp);
}
void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
fscache_wait_on_page_write(vcookie->fscache, page);
BUG_ON(!PageLocked(page));
fscache_uncache_page(vcookie->fscache, page);
- ClearPageFsCache(page);
}
}
diff --git a/fs/Kconfig b/fs/Kconfig
index 2126078a38e..64d44efad7a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -135,7 +135,7 @@ config TMPFS_POSIX_ACL
config HUGETLBFS
bool "HugeTLB file system support"
- depends on X86 || IA64 || PPC_BOOK3S_64 || SPARC64 || (S390 && 64BIT) || \
+ depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
SYS_SUPPORTS_HUGETLBFS || BROKEN
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 681c2a7b013..39b301662f2 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
fscache_wait_on_page_write(vnode->cache, page);
fscache_uncache_page(vnode->cache, page);
- ClearPageFsCache(page);
}
#endif
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page)) {
- if (fscache_check_page_write(vnode->cache, page)) {
- if (!(gfp_flags & __GFP_WAIT)) {
- _leave(" = F [cache busy]");
- return 0;
- }
- fscache_wait_on_page_write(vnode->cache, page);
- }
-
- fscache_uncache_page(vnode->cache, page);
- ClearPageFsCache(page);
+ if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
+ _leave(" = F [cache busy]");
+ return 0;
}
#endif
diff --git a/fs/bio.c b/fs/bio.c
index 402cb84a92a..12da5db8682 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -325,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio)
* @gfp_mask: allocation mask to use
* @nr_iovecs: number of iovecs
*
- * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
- * contains __GFP_WAIT, the allocation is guaranteed to succeed.
+ * bio_alloc will allocate a bio and associated bio_vec array that can hold
+ * at least @nr_iovecs entries. Allocations will be done from the
+ * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
+ *
+ * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
+ * a bio. This is due to the mempool guarantees. To make this work, callers
+ * must never allocate more than 1 bio at a time from this pool. Callers
+ * that need to allocate more than 1 bio must always submit the previously
+ * allocated bio for IO before attempting to allocate a new one. Failure to
+ * do so can cause livelocks under memory pressure.
*
* RETURNS:
* Pointer to new bio on success, NULL on failure.
@@ -350,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio)
}
/**
- * bio_alloc - allocate a bio for I/O
+ * bio_kmalloc - allocate a bio for I/O using kmalloc()
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
*
* Description:
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before attempting to allocate a new one. Failure to
- * do so can cause livelocks under memory pressure.
+ * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
+ * %__GFP_WAIT, the allocation is guaranteed to succeed.
*
**/
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
@@ -407,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio);
*
* Description:
* Put a reference to a &struct bio, either one you have gotten with
- * bio_alloc or bio_get. The last put of a bio will free it.
+ * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
**/
void bio_put(struct bio *bio)
{
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e238a0cdac6..94627c4cc19 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2977,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
free_space = btrfs_super_total_bytes(disk_super);
/*
- * we allow the metadata to grow to a max of either 5gb or 5% of the
+ * we allow the metadata to grow to a max of either 10gb or 5% of the
* space in the volume.
*/
- min_metadata = min((u64)5 * 1024 * 1024 * 1024,
+ min_metadata = min((u64)10 * 1024 * 1024 * 1024,
div64_u64(free_space * 5, 100));
if (info->total_bytes >= min_metadata) {
spin_unlock(&info->lock);
@@ -4102,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
}
enum btrfs_loop_type {
- LOOP_CACHED_ONLY = 0,
+ LOOP_FIND_IDEAL = 0,
LOOP_CACHING_NOWAIT = 1,
LOOP_CACHING_WAIT = 2,
LOOP_ALLOC_CHUNK = 3,
@@ -4131,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group = NULL;
int empty_cluster = 2 * 1024 * 1024;
int allowed_chunk_alloc = 0;
+ int done_chunk_alloc = 0;
struct btrfs_space_info *space_info;
int last_ptr_loop = 0;
int loop = 0;
bool found_uncached_bg = false;
bool failed_cluster_refill = false;
bool failed_alloc = false;
+ u64 ideal_cache_percent = 0;
+ u64 ideal_cache_offset = 0;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4172,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
empty_cluster = 0;
if (search_start == hint_byte) {
+ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
+ *
+ * However if we are re-searching with an ideal block group
+ * picked out then we don't care that the block group is cached.
*/
if (block_group && block_group_bits(block_group, data) &&
- block_group_cache_done(block_group)) {
+ (block_group->cached != BTRFS_CACHE_NO ||
+ search_start == ideal_cache_offset)) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
block_group->ro) {
@@ -4191,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
*/
btrfs_put_block_group(block_group);
up_read(&space_info->groups_sem);
- } else
+ } else {
goto have_block_group;
+ }
} else if (block_group) {
btrfs_put_block_group(block_group);
}
}
-
search:
down_read(&space_info->groups_sem);
list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -4209,28 +4217,45 @@ search:
have_block_group:
if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+ u64 free_percent;
+
+ free_percent = btrfs_block_group_used(&block_group->item);
+ free_percent *= 100;
+ free_percent = div64_u64(free_percent,
+ block_group->key.offset);
+ free_percent = 100 - free_percent;
+ if (free_percent > ideal_cache_percent &&
+ likely(!block_group->ro)) {
+ ideal_cache_offset = block_group->key.objectid;
+ ideal_cache_percent = free_percent;
+ }
+
/*
- * we want to start caching kthreads, but not too many
- * right off the bat so we don't overwhelm the system,
- * so only start them if there are less than 2 and we're
- * in the initial allocation phase.
+ * We only want to start kthread caching if we are at
+ * the point where we will wait for caching to make
+ * progress, or if our ideal search is over and we've
+ * found somebody to start caching.
*/
if (loop > LOOP_CACHING_NOWAIT ||
- atomic_read(&space_info->caching_threads) < 2) {
+ (loop > LOOP_FIND_IDEAL &&
+ atomic_read(&space_info->caching_threads) < 2)) {
ret = cache_block_group(block_g