aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2009-10-27 18:45:17 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-10-27 18:45:17 -0400
commite343eb0d60f74547e0aeb5bd151105c2e6cfe588 (patch)
tree92586df0daf3298262a957640e5c86679c963f41 /fs
parentddc04fd4d5163aee9ebdb38a56c365b602e2b7b7 (diff)
parent012abeea669ea49636cf952d13298bb68654146a (diff)
Merge commit 'v2.6.32-rc5' into for-2.6.33
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/cache.h12
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/bio.c49
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/async-thread.c81
-rw-r--r--fs/btrfs/async-thread.h10
-rw-r--r--fs/btrfs/btrfs_inode.h16
-rw-r--r--fs/btrfs/ctree.h38
-rw-r--r--fs/btrfs/disk-io.c60
-rw-r--r--fs/btrfs/extent-tree.c615
-rw-r--r--fs/btrfs/extent_io.c134
-rw-r--r--fs/btrfs/extent_io.h31
-rw-r--r--fs/btrfs/file.c79
-rw-r--r--fs/btrfs/inode.c357
-rw-r--r--fs/btrfs/ioctl.c69
-rw-r--r--fs/btrfs/ordered-data.c99
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/transaction.c55
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-log.c56
-rw-r--r--fs/btrfs/tree-log.h3
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/coda/psdev.c1
-rw-r--r--fs/dlm/lowcomms.c36
-rw-r--r--fs/ecryptfs/Kconfig3
-rw-r--r--fs/ecryptfs/main.c7
-rw-r--r--fs/ext3/super.c13
-rw-r--r--fs/ext4/Kconfig14
-rw-r--r--fs/ext4/ext4.h54
-rw-r--r--fs/ext4/ext4_extents.h7
-rw-r--r--fs/ext4/ext4_jbd2.h6
-rw-r--r--fs/ext4/extents.c444
-rw-r--r--fs/ext4/fsync.c5
-rw-r--r--fs/ext4/inode.c578
-rw-r--r--fs/ext4/mballoc.c305
-rw-r--r--fs/ext4/mballoc.h35
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c20
-rw-r--r--fs/ext4/namei.c3
-rw-r--r--fs/ext4/super.c130
-rw-r--r--fs/fat/fat.h2
-rw-r--r--fs/fat/inode.c18
-rw-r--r--fs/fat/misc.c8
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/file.c1
-rw-r--r--fs/jbd2/checkpoint.c7
-rw-r--r--fs/jbd2/commit.c59
-rw-r--r--fs/jbd2/journal.c198
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4renewd.c6
-rw-r--r--fs/nfs/super.c36
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nilfs2/btnode.c1
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/mdt.c2
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nls/nls_base.c8
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/netdebug.c4
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c8
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/omfs/dir.c2
-rw-r--r--fs/omfs/file.c2
-rw-r--r--fs/omfs/omfs.h4
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/page.c5
-rw-r--r--fs/romfs/storage.c4
-rw-r--r--fs/select.c1
-rw-r--r--fs/sysfs/dir.c3
-rw-r--r--fs/sysfs/file.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c38
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c41
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c59
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c36
-rw-r--r--fs/xfs/xfs_dfrag.c8
-rw-r--r--fs/xfs/xfs_dir2_leaf.c4
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c18
-rw-r--r--fs/xfs/xfs_itable.c21
-rw-r--r--fs/xfs/xfs_vnodeops.c6
90 files changed, 2732 insertions, 1419 deletions
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
deleted file mode 100644
index 5c4f6b499e9..00000000000
--- a/fs/afs/cache.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* AFS local cache management interface
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/fscache.h>
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 106be66dafd..6ece2a13bf7 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -18,10 +18,10 @@
#include <linux/key.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/fscache.h>
#include "afs.h"
#include "afs_vl.h"
-#include "cache.h"
#define AFS_CELL_MAX_ADDRS 15
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index d11c51fc2a3..2ca7a7cafdb 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -8,8 +8,10 @@
*
*/
+#include <linux/cred.h>
#include <linux/file.h>
#include <linux/poll.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fs.h>
diff --git a/fs/bio.c b/fs/bio.c
index 76738005c8e..402cb84a92a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
mempool_free(p, bs->bio_pool);
}
+EXPORT_SYMBOL(bio_free);
void bio_init(struct bio *bio)
{
@@ -257,6 +258,7 @@ void bio_init(struct bio *bio)
bio->bi_comp_cpu = -1;
atomic_set(&bio->bi_cnt, 1);
}
+EXPORT_SYMBOL(bio_init);
/**
* bio_alloc_bioset - allocate a bio for I/O
@@ -311,6 +313,7 @@ err_free:
mempool_free(p, bs->bio_pool);
return NULL;
}
+EXPORT_SYMBOL(bio_alloc_bioset);
static void bio_fs_destructor(struct bio *bio)
{
@@ -337,6 +340,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
return bio;
}
+EXPORT_SYMBOL(bio_alloc);
static void bio_kmalloc_destructor(struct bio *bio)
{
@@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
return bio;
}
+EXPORT_SYMBOL(bio_kmalloc);
void zero_fill_bio(struct bio *bio)
{
@@ -416,6 +421,7 @@ void bio_put(struct bio *bio)
bio->bi_destructor(bio);
}
}
+EXPORT_SYMBOL(bio_put);
inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
@@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
return bio->bi_phys_segments;
}
+EXPORT_SYMBOL(bio_phys_segments);
/**
* __bio_clone - clone a bio
@@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
bio->bi_size = bio_src->bi_size;
bio->bi_idx = bio_src->bi_idx;
}
+EXPORT_SYMBOL(__bio_clone);
/**
* bio_clone - clone a bio
@@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
return b;
}
+EXPORT_SYMBOL(bio_clone);
/**
* bio_get_nr_vecs - return approx number of vecs
@@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
return nr_pages;
}
+EXPORT_SYMBOL(bio_get_nr_vecs);
static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
@@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
return __bio_add_page(q, bio, page, len, offset,
queue_max_hw_sectors(q));
}
+EXPORT_SYMBOL(bio_add_pc_page);
/**
* bio_add_page - attempt to add page to bio
@@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
}
+EXPORT_SYMBOL(bio_add_page);
struct bio_map_data {
struct bio_vec *iovecs;
@@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio)
bio_put(bio);
return ret;
}
+EXPORT_SYMBOL(bio_uncopy_user);
/**
* bio_copy_user_iov - copy user data to bio
@@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
}
+EXPORT_SYMBOL(bio_copy_user);
static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
@@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
}
+EXPORT_SYMBOL(bio_map_user);
/**
* bio_map_user_iov - map user sg_iovec table into bio
@@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio)
__bio_unmap_user(bio);
bio_put(bio);
}
+EXPORT_SYMBOL(bio_unmap_user);
static void bio_map_kern_endio(struct bio *bio, int err)
{
bio_put(bio);
}
-
static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
@@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
bio_put(bio);
return ERR_PTR(-EINVAL);
}
+EXPORT_SYMBOL(bio_map_kern);
static void bio_copy_kern_endio(struct bio *bio, int err)
{
@@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
return bio;
}
+EXPORT_SYMBOL(bio_copy_kern);
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
@@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error)
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
}
+EXPORT_SYMBOL(bio_endio);
void bio_pair_release(struct bio_pair *bp)
{
@@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp)
mempool_free(bp, bp->bio2.bi_private);
}
}
+EXPORT_SYMBOL(bio_pair_release);
static void bio_pair_end_1(struct bio *bi, int err)
{
@@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
return bp;
}
+EXPORT_SYMBOL(bio_split);
/**
* bio_sector_offset - Find hardware sector offset in bio
@@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs)
kfree(bs);
}
+EXPORT_SYMBOL(bioset_free);
/**
* bioset_create - Create a bio_set
@@ -1592,6 +1613,7 @@ bad:
bioset_free(bs);
return NULL;
}
+EXPORT_SYMBOL(bioset_create);
static void __init biovec_init_slabs(void)
{
@@ -1636,29 +1658,4 @@ static int __init init_bio(void)
return 0;
}
-
subsys_initcall(init_bio);
-
-EXPORT_SYMBOL(bio_alloc);
-EXPORT_SYMBOL(bio_kmalloc);
-EXPORT_SYMBOL(bio_put);
-EXPORT_SYMBOL(bio_free);
-EXPORT_SYMBOL(bio_endio);
-EXPORT_SYMBOL(bio_init);
-EXPORT_SYMBOL(__bio_clone);
-EXPORT_SYMBOL(bio_clone);
-EXPORT_SYMBOL(bio_phys_segments);
-EXPORT_SYMBOL(bio_add_page);
-EXPORT_SYMBOL(bio_add_pc_page);
-EXPORT_SYMBOL(bio_get_nr_vecs);
-EXPORT_SYMBOL(bio_map_user);
-EXPORT_SYMBOL(bio_unmap_user);
-EXPORT_SYMBOL(bio_map_kern);
-EXPORT_SYMBOL(bio_copy_kern);
-EXPORT_SYMBOL(bio_pair_release);
-EXPORT_SYMBOL(bio_split);
-EXPORT_SYMBOL(bio_copy_user);
-EXPORT_SYMBOL(bio_uncopy_user);
-EXPORT_SYMBOL(bioset_create);
-EXPORT_SYMBOL(bioset_free);
-EXPORT_SYMBOL(bio_alloc_bioset);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index f128427b995..36160424427 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
#include "btrfs_inode.h"
#include "xattr.h"
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_BTRFS_FS_POSIX_ACL
static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
.set = btrfs_xattr_acl_access_set,
};
-#else /* CONFIG_FS_POSIX_ACL */
+#else /* CONFIG_BTRFS_FS_POSIX_ACL */
int btrfs_acl_chmod(struct inode *inode)
{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
return 0;
}
-#endif /* CONFIG_FS_POSIX_ACL */
+#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 282ca085c2f..c0861e781cd 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,51 @@ struct btrfs_worker_thread {
};
/*
+ * btrfs_start_workers uses kthread_run, which can block waiting for memory
+ * for a very long time. It will actually throttle on page writeback,
+ * and so it may not make progress until after our btrfs worker threads
+ * process all of the pending work structs in their queue
+ *
+ * This means we can't use btrfs_start_workers from inside a btrfs worker
+ * thread that is used as part of cleaning dirty memory, which pretty much
+ * involves all of the worker threads.
+ *
+ * Instead we have a helper queue who never has more than one thread
+ * where we scheduler thread start operations. This worker_start struct
+ * is used to contain the work and hold a pointer to the queue that needs
+ * another worker.
+ */
+struct worker_start {
+ struct btrfs_work work;
+ struct btrfs_workers *queue;
+};
+
+static void start_new_worker_func(struct btrfs_work *work)
+{
+ struct worker_start *start;
+ start = container_of(work, struct worker_start, work);
+ btrfs_start_workers(start->queue, 1);
+ kfree(start);
+}
+
+static int start_new_worker(struct btrfs_workers *queue)
+{
+ struct worker_start *start;
+ int ret;
+
+ start = kzalloc(sizeof(*start), GFP_NOFS);
+ if (!start)
+ return -ENOMEM;
+
+ start->work.func = start_new_worker_func;
+ start->queue = queue;
+ ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
+ if (ret)
+ kfree(start);
+ return ret;
+}
+
+/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
*/
@@ -118,11 +163,13 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
goto out;
workers->atomic_start_pending = 0;
- if (workers->num_workers >= workers->max_workers)
+ if (workers->num_workers + workers->num_workers_starting >=
+ workers->max_workers)
goto out;
+ workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
- btrfs_start_workers(workers, 1);
+ start_new_worker(workers);
return;
out:
@@ -390,9 +437,11 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
/*
* simple init on struct btrfs_workers
*/
-void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
+void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
+ struct btrfs_workers *async_helper)
{
workers->num_workers = 0;
+ workers->num_workers_starting = 0;
INIT_LIST_HEAD(&workers->worker_list);
INIT_LIST_HEAD(&workers->idle_list);
INIT_LIST_HEAD(&workers->order_list);
@@ -404,14 +453,15 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
workers->name = name;
workers->ordered = 0;
workers->atomic_start_pending = 0;
- workers->atomic_worker_start = 0;
+ workers->atomic_worker_start = async_helper;
}
/*
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers,
+ int num_workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
@@ -444,6 +494,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
workers->num_workers++;
+ workers->num_workers_starting--;
+ WARN_ON(workers->num_workers_starting < 0);
spin_unlock_irq(&workers->lock);
}
return 0;
@@ -452,6 +504,14 @@ fail:
return ret;
}
+int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+{
+ spin_lock_irq(&workers->lock);
+ workers->num_workers_starting += num_workers;
+ spin_unlock_irq(&workers->lock);
+ return __btrfs_start_workers(workers, num_workers);
+}
+
/*
* run through the list and find a worker thread that doesn't have a lot
* to do right now. This can return null if we aren't yet at the thread