From c37650161a53c01ddd88587675f9a4adc909a73e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 6 Oct 2010 10:48:20 +0200 Subject: fs: add sync_inode_metadata Add a new helper to write out the inode using the writeback code, that is including the correct dirty bit and list manipulation. A few of filesystems already opencode this, and a lot of others should be using it instead of using write_inode_now which also writes out the data. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fs-writeback.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'fs/fs-writeback.c') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index ab38fef1c9a..29e3f409bbd 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1198,3 +1198,23 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc) return ret; } EXPORT_SYMBOL(sync_inode); + +/** + * sync_inode - write an inode to disk + * @inode: the inode to sync + * @wait: wait for I/O to complete. + * + * Write an inode to disk and adjust it's dirty state after completion. + * + * Note: only writes the actual inode, no associated data or other metadata. + */ +int sync_inode_metadata(struct inode *inode, int wait) +{ + struct writeback_control wbc = { + .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, + .nr_to_write = 0, /* metadata-only */ + }; + + return sync_inode(inode, &wbc); +} +EXPORT_SYMBOL(sync_inode_metadata); -- cgit v1.2.3-18-g5258 From 1d3382cbf02986e4833849f528d451367ea0b4cb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 23 Oct 2010 15:19:20 -0400 Subject: new helper: inode_unhashed() note: for race-free uses you inode_lock held Signed-off-by: Al Viro --- fs/fs-writeback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/fs-writeback.c') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 29e3f409bbd..39f44f2e709 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -962,7 +962,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) * dirty list. Add blockdev inodes as well. */ if (!S_ISBLK(inode->i_mode)) { - if (hlist_unhashed(&inode->i_hash)) + if (inode_unhashed(inode)) goto out; } if (inode->i_state & I_FREEING) -- cgit v1.2.3-18-g5258 From cffbc8aa334f55c9ed42d25202eb3ebf3a97c195 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Sat, 23 Oct 2010 05:03:02 -0400 Subject: fs: Convert nr_inodes and nr_unused to per-cpu counters The number of inodes allocated does not need to be tied to the addition or removal of an inode to/from a list. If we are not tied to a list lock, we could update the counters when inodes are initialised or destroyed, but to do that we need to convert the counters to be per-cpu (i.e. independent of a lock). This means that we have the freedom to change the list/locking implementation without needing to care about the counters. Based on a patch originally from Eric Dumazet. [AV: cleaned up a bit, fixed build breakage on weird configs Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fs-writeback.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs/fs-writeback.c') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 39f44f2e709..f04d04af84f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -723,7 +723,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) wb->last_old_flush = jiffies; nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS) + - (inodes_stat.nr_inodes - inodes_stat.nr_unused); + get_nr_dirty_inodes(); if (nr_pages) { struct wb_writeback_work work = { @@ -1090,8 +1090,7 @@ void writeback_inodes_sb(struct super_block *sb) WARN_ON(!rwsem_is_locked(&sb->s_umount)); - work.nr_pages = nr_dirty + nr_unstable + - (inodes_stat.nr_inodes - inodes_stat.nr_unused); + work.nr_pages = nr_dirty + nr_unstable + get_nr_dirty_inodes(); bdi_queue_work(sb->s_bdi, &work); wait_for_completion(&done); -- cgit v1.2.3-18-g5258 From 9e38d86ff2d8a8db99570e982230861046df32b5 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 23 Oct 2010 06:55:17 -0400 Subject: fs: Implement lazy LRU updates for inodes Convert the inode LRU to use lazy updates to reduce lock and cacheline traffic. We avoid moving inodes around in the LRU list during iget/iput operations so these frequent operations don't need to access the LRUs. Instead, we defer the refcount checks to reclaim-time and use a per-inode state flag, I_REFERENCED, to tell reclaim that iget has touched the inode in the past. This means that only reclaim should be touching the LRU with any frequency, hence significantly reducing lock acquisitions and the amount contention on LRU updates. This also removes the inode_in_use list, which means we now only have one list for tracking the inode LRU status. This makes it much simpler to split out the LRU list operations under it's own lock. Signed-off-by: Nick Piggin Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fs-writeback.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs/fs-writeback.c') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f04d04af84f..e8f65290e83 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -408,16 +408,13 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * completion. */ redirty_tail(inode); - } else if (atomic_read(&inode->i_count)) { - /* - * The inode is clean, inuse - */ - list_move(&inode->i_list, &inode_in_use); } else { /* - * The inode is clean, unused + * The inode is clean. At this point we either have + * a reference to the inode or it's on it's way out. + * No need to add it back to the LRU. */ - list_move(&inode->i_list, &inode_unused); + list_del_init(&inode->i_list); } } inode_sync_complete(inode); -- cgit v1.2.3-18-g5258 From 7ccf19a8042e343f8159f8a5fdd6a9422aa90c78 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 21 Oct 2010 11:49:30 +1100 Subject: fs: inode split IO and LRU lists The use of the same inode list structure (inode->i_list) for two different list constructs with different lifecycles and purposes makes it impossible to separate the locking of the different operations. Therefore, to enable the separation of the locking of the writeback and reclaim lists, split the inode->i_list into two separate lists dedicated to their specific tracking functions. Signed-off-by: Nick Piggin Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fs-writeback.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'fs/fs-writeback.c') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e8f65290e83..7a24cc957f0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -79,6 +79,11 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) return sb->s_bdi; } +static inline struct inode *wb_inode(struct list_head *head) +{ + return list_entry(head, struct inode, i_wb_list); +} + static void bdi_queue_work(struct backing_dev_info *bdi, struct wb_writeback_work *work) { @@ -172,11 +177,11 @@ static void redirty_tail(struct inode *inode) if (!list_empty(&wb->b_dirty)) { struct inode *tail; - tail = list_entry(wb->b_dirty.next, struct inode, i_list); + tail = wb_inode(wb->b_dirty.next); if (time_before(inode->dirtied_when, tail->dirtied_when)) inode->dirtied_when = jiffies; } - list_move(&inode->i_list, &wb->b_dirty); + list_move(&inode->i_wb_list, &wb->b_dirty); } /* @@ -186,7 +191,7 @@ static void requeue_io(struct inode *inode) { struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; - list_move(&inode->i_list, &wb->b_more_io); + list_move(&inode->i_wb_list, &wb->b_more_io); } static void inode_sync_complete(struct inode *inode) @@ -227,14 +232,14 @@ static void move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; while (!list_empty(delaying_queue)) { - inode = list_entry(delaying_queue->prev, struct inode, i_list); + inode = wb_inode(delaying_queue->prev); if (older_than_this && inode_dirtied_after(inode, *older_than_this)) break; if (sb && sb != inode->i_sb) do_sb_sort = 1; sb = inode->i_sb; - list_move(&inode->i_list, &tmp); + list_move(&inode->i_wb_list, &tmp); } /* just one sb in list, splice to dispatch_queue and we're done */ @@ -245,12 +250,11 @@ static void move_expired_inodes(struct list_head *delaying_queue, /* Move inodes from one superblock together */ while (!list_empty(&tmp)) { - inode = list_entry(tmp.prev, struct inode, i_list); - sb = inode->i_sb; + sb = wb_inode(tmp.prev)->i_sb; list_for_each_prev_safe(pos, node, &tmp) { - inode = list_entry(pos, struct inode, i_list); + inode = wb_inode(pos); if (inode->i_sb == sb) - list_move(&inode->i_list, dispatch_queue); + list_move(&inode->i_wb_list, dispatch_queue); } } } @@ -414,7 +418,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * a reference to the inode or it's on it's way out. * No need to add it back to the LRU. */ - list_del_init(&inode->i_list); + list_del_init(&inode->i_wb_list); } } inode_sync_complete(inode); @@ -462,8 +466,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, { while (!list_empty(&wb->b_io)) { long pages_skipped; - struct inode *inode = list_entry(wb->b_io.prev, - struct inode, i_list); + struct inode *inode = wb_inode(wb->b_io.prev); if (inode->i_sb != sb) { if (only_this_sb) { @@ -533,8 +536,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, queue_io(wb, wbc->older_than_this); while (!list_empty(&wb->b_io)) { - struct inode *inode = list_entry(wb->b_io.prev, - struct inode, i_list); + struct inode *inode = wb_inode(wb->b_io.prev); struct super_block *sb = inode->i_sb; if (!pin_sb_for_writeback(sb)) { @@ -672,8 +674,7 @@ static long wb_writeback(struct bdi_writeback *wb, */ spin_lock(&inode_lock); if (!list_empty(&wb->b_more_io)) { - inode = list_entry(wb->b_more_io.prev, - struct inode, i_list); + inode = wb_inode(wb->b_more_io.prev); trace_wbc_writeback_wait(&wbc, wb->bdi); inode_wait_for_writeback(inode); } @@ -987,7 +988,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) } inode->dirtied_when = jiffies; - list_move(&inode->i_list, &bdi->wb.b_dirty); + list_move(&inode->i_wb_list, &bdi->wb.b_dirty); } } out: -- cgit v1.2.3-18-g5258 From 9843b76aae80293f5b5a0e275360627508595ce5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 24 Oct 2010 19:40:46 +0200 Subject: fs: skip I_FREEING inodes in writeback_sb_inodes Skip I_FREEING inodes just like I_WILL_FREE and I_NEW when walking the writeback lists. Currenly this can't happen, but once we move from inode_lock to more fine grained locking we can have an inode that's still on the writeback lists but has I_FREEING set, and we absolutely need to skip it here, just like we do for all other inode list walks. Based on a patch from Dave Chinner. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fs-writeback.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs/fs-writeback.c') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7a24cc957f0..f6af81add45 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -487,10 +487,16 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, return 0; } - if (inode->i_state & (I_NEW | I_WILL_FREE)) { + /* + * Don't bother with new inodes or inodes beeing freed, first + * kind does not need peridic writeout yet, and for the latter + * kind writeout is handled by the freer. + */ + if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { requeue_io(inode); continue; } + /* * Was this inode dirtied after sync_sb_inodes was called? * This keeps sync from extra jobs and livelock. @@ -498,7 +504,6 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, if (inode_dirtied_after(inode, wbc->wb_start)) return 1; - BUG_ON(inode->i_state & I_FREEING); __iget(inode); pages_skipped = wbc->pages_skipped; writeback_single_inode(inode, wbc); -- cgit v1.2.3-18-g5258