aboutsummaryrefslogtreecommitdiff
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c255
1 files changed, 143 insertions, 112 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index d5be1693ac9..7d9d06ba184 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -26,15 +26,9 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
+#include <linux/tracepoint.h>
#include "internal.h"
-#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-
-/*
- * We don't actually have pdflush, but this one is exported though /proc...
- */
-int nr_pdflush_threads;
-
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
@@ -50,6 +44,21 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
+/*
+ * Include the creation of the trace points after defining the
+ * wb_writeback_work structure so that the definition remains local to this
+ * file.
+ */
+#define CREATE_TRACE_POINTS
+#include <trace/events/writeback.h>
+
+#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
+
+/*
+ * We don't actually have pdflush, but this one is exported though /proc...
+ */
+int nr_pdflush_threads;
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
@@ -59,28 +68,27 @@ struct wb_writeback_work {
*/
int writeback_in_progress(struct backing_dev_info *bdi)
{
- return !list_empty(&bdi->work_list);
+ return test_bit(BDI_writeback_running, &bdi->state);
}
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
- spin_lock(&bdi->wb_lock);
- list_add_tail(&work->list, &bdi->work_list);
- spin_unlock(&bdi->wb_lock);
+ trace_writeback_queue(bdi, work);
- /*
- * If the default thread isn't there, make sure we add it. When
- * it gets created and wakes up, we'll run this work.
- */
- if (unlikely(list_empty_careful(&bdi->wb_list)))
+ spin_lock_bh(&bdi->wb_lock);
+ list_add_tail(&work->list, &bdi->work_list);
+ if (bdi->wb.task) {
+ wake_up_process(bdi->wb.task);
+ } else {
+ /*
+ * The bdi thread isn't there, wake up the forker thread which
+ * will create and run it.
+ */
+ trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task);
- else {
- struct bdi_writeback *wb = &bdi->wb;
-
- if (wb->task)
- wake_up_process(wb->task);
}
+ spin_unlock_bh(&bdi->wb_lock);
}
static void
@@ -95,8 +103,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
*/
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
- if (bdi->wb.task)
+ if (bdi->wb.task) {
+ trace_writeback_nowork(bdi);
wake_up_process(bdi->wb.task);
+ }
return;
}
@@ -239,10 +249,18 @@ static void move_expired_inodes(struct list_head *delaying_queue,
/*
* Queue all expired dirty inodes for io, eldest first.
+ * Before
+ * newly dirtied b_dirty b_io b_more_io
+ * =============> gf edc BA
+ * After
+ * newly dirtied b_dirty b_io b_more_io
+ * =============> g fBAedc
+ * |
+ * +--> dequeue for IO
*/
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
{
- list_splice_init(&wb->b_more_io, wb->b_io.prev);
+ list_splice_init(&wb->b_more_io, &wb->b_io);
move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
@@ -352,63 +370,36 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
spin_lock(&inode_lock);
inode->i_state &= ~I_SYNC;
- if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
- if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
- /*
- * More pages get dirtied by a fast dirtier.
- */
- goto select_queue;
- } else if (inode->i_state & I_DIRTY) {
- /*
- * At least XFS will redirty the inode during the
- * writeback (delalloc) and on io completion (isize).
- */
- redirty_tail(inode);
- } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ if (!(inode->i_state & I_FREEING)) {
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
- * sometimes bales out without doing anything. Redirty
- * the inode; Move it from b_io onto b_more_io/b_dirty.
- */
- /*
- * akpm: if the caller was the kupdate function we put
- * this inode at the head of b_dirty so it gets first
- * consideration. Otherwise, move it to the tail, for
- * the reasons described there. I'm not really sure
- * how much sense this makes. Presumably I had a good
- * reasons for doing it this way, and I'd rather not
- * muck with it at present.
+ * sometimes bales out without doing anything.
*/
- if (wbc->for_kupdate) {
+ inode->i_state |= I_DIRTY_PAGES;
+ if (wbc->nr_to_write <= 0) {
/*
- * For the kupdate function we move the inode
- * to b_more_io so it will get more writeout as
- * soon as the queue becomes uncongested.
+ * slice used up: queue for next turn
*/
- inode->i_state |= I_DIRTY_PAGES;
-select_queue:
- if (wbc->nr_to_write <= 0) {
- /*
- * slice used up: queue for next turn
- */
- requeue_io(inode);
- } else {
- /*
- * somehow blocked: retry later
- */
- redirty_tail(inode);
- }
+ requeue_io(inode);
} else {
/*
- * Otherwise fully redirty the inode so that
- * other inodes on this superblock will get some
- * writeout. Otherwise heavy writing to one
- * file would indefinitely suspend writeout of
- * all the other files.
+ * Writeback blocked by something other than
+ * congestion. Delay the inode for some time to
+ * avoid spinning on the CPU (100% iowait)
+ * retrying writeback of the dirty page/inode
+ * that cannot be performed immediately.
*/
- inode->i_state |= I_DIRTY_PAGES;
redirty_tail(inode);
}
+ } else if (inode->i_state & I_DIRTY) {
+ /*
+ * Filesystems can dirty the inode during writeback
+ * operations, such as delayed allocation during
+ * submission or metadata updates after data IO
+ * completion.
+ */
+ redirty_tail(inode);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
@@ -499,7 +490,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
if (inode_dirtied_after(inode, wbc->wb_start))
return 1;
- BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
+ BUG_ON(inode->i_state & I_FREEING);
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
@@ -530,7 +521,8 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
{
int ret = 0;
- wbc->wb_start = jiffies; /* livelock avoidance */
+ if (!wbc->wb_start)
+ wbc->wb_start = jiffies; /* livelock avoidance */
spin_lock(&inode_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
@@ -559,7 +551,6 @@ static void __writeback_inodes_sb(struct super_block *sb,
{
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- wbc->wb_start = jiffies; /* livelock avoidance */
spin_lock(&inode_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
@@ -580,7 +571,7 @@ static inline bool over_bground_thresh(void)
{
unsigned long background_thresh, dirty_thresh;
- get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+ global_dirty_limits(&background_thresh, &dirty_thresh);
return (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
@@ -625,6 +616,7 @@ static long wb_writeback(struct bdi_writeback *wb,
wbc.range_end = LLONG_MAX;
}
+ wbc.wb_start = jiffies; /* livelock avoidance */
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
@@ -642,10 +634,14 @@ static long wb_writeback(struct bdi_writeback *wb,
wbc.more_io = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
+
+ trace_wbc_writeback_start(&wbc, wb->bdi);
if (work->sb)
__writeback_inodes_sb(work->sb, wb, &wbc);
else
writeback_inodes_wb(wb, &wbc);
+ trace_wbc_writeback_written(&wbc, wb->bdi);
+
work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
@@ -673,6 +669,7 @@ static long wb_writeback(struct bdi_writeback *wb,
if (!list_empty(&wb->b_more_io)) {
inode = list_entry(wb->b_more_io.prev,
struct inode, i_list);
+ trace_wbc_writeback_wait(&wbc, wb->bdi);
inode_wait_for_writeback(inode);
}
spin_unlock(&inode_lock);
@@ -685,17 +682,17 @@ static long wb_writeback(struct bdi_writeback *wb,
* Return the next wb_writeback_work struct that hasn't been processed yet.
*/
static struct wb_writeback_work *
-get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb)
+get_next_work_item(struct backing_dev_info *bdi)
{
struct wb_writeback_work *work = NULL;
- spin_lock(&bdi->wb_lock);
+ spin_lock_bh(&bdi->wb_lock);
if (!list_empty(&bdi->work_list)) {
work = list_entry(bdi->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
- spin_unlock(&bdi->wb_lock);
+ spin_unlock_bh(&bdi->wb_lock);
return work;
}
@@ -743,7 +740,8 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
struct wb_writeback_work *work;
long wrote = 0;
- while ((work = get_next_work_item(bdi, wb)) != NULL) {
+ set_bit(BDI_writeback_running, &wb->bdi->state);
+ while ((work = get_next_work_item(bdi)) != NULL) {
/*
* Override sync mode, in case we must wait for completion
* because this thread is exiting now.
@@ -751,6 +749,8 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
if (force_wait)
work->sync_mode = WB_SYNC_ALL;
+ trace_writeback_exec(bdi, work);
+
wrote += wb_writeback(wb, work);
/*
@@ -767,6 +767,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
+ clear_bit(BDI_writeback_running, &wb->bdi->state);
return wrote;
}
@@ -775,47 +776,66 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
* Handle writeback of dirty data for the device backed by this bdi. Also
* wakes up periodically and does kupdated style flushing.
*/
-int bdi_writeback_task(struct bdi_writeback *wb)
+int bdi_writeback_thread(void *data)
{
- unsigned long last_active = jiffies;
- unsigned long wait_jiffies = -1UL;
+ struct bdi_writeback *wb = data;
+ struct backing_dev_info *bdi = wb->bdi;
long pages_written;
+ current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ set_freezable();
+ wb->last_active = jiffies;
+
+ /*
+ * Our parent may run at a different priority, just set us to normal
+ */
+ set_user_nice(current, 0);
+
+ trace_writeback_thread_start(bdi);
+
while (!kthread_should_stop()) {
+ /*
+ * Remove own delayed wake-up timer, since we are already awake
+ * and we'll take care of the preriodic write-back.
+ */
+ del_timer(&wb->wakeup_timer);
+
pages_written = wb_do_writeback(wb, 0);
+ trace_writeback_pages_written(pages_written);
+
if (pages_written)
- last_active = jiffies;
- else if (wait_jiffies != -1UL) {
- unsigned long max_idle;
+ wb->last_active = jiffies;
- /*
- * Longest period of inactivity that we tolerate. If we
- * see dirty data again later, the task will get
- * recreated automatically.
- */
- max_idle = max(5UL * 60 * HZ, wait_jiffies);
- if (time_after(jiffies, max_idle + last_active))
- break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!list_empty(&bdi->work_list)) {
+ __set_current_state(TASK_RUNNING);
+ continue;
}
- if (dirty_writeback_interval) {
- wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
- schedule_timeout_interruptible(wait_jiffies);
- } else {
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty_careful(&wb->bdi->work_list) &&
- !kthread_should_stop())
- schedule();
- __set_current_state(TASK_RUNNING);
+ if (wb_has_dirty_io(wb) && dirty_writeback_interval)
+ schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
+ else {
+ /*
+ * We have nothing to do, so can go sleep without any
+ * timeout and save power. When a work is queued or
+ * something is made dirty - we will be woken up.
+ */
+ schedule();
}
try_to_freeze();
}
+ /* Flush any work that raced with us exiting */
+ if (!list_empty(&bdi->work_list))
+ wb_do_writeback(wb, 1);
+
+ trace_writeback_thread_stop(bdi);
return 0;
}
+
/*
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
* the whole world.
@@ -890,6 +910,8 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
+ struct backing_dev_info *bdi = NULL;
+ bool wakeup_bdi = false;
/*
* Don't do this for I_DIRTY_PAGES - that doesn't actually
@@ -935,7 +957,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (hlist_unhashed(&inode->i_hash))
goto out;
}
- if (inode->i_state & (I_FREEING|I_CLEAR))
+ if (inode->i_state & I_FREEING)
goto out;
/*
@@ -943,22 +965,31 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- struct backing_dev_info *bdi = wb->bdi;
-
- if (bdi_cap_writeback_dirty(bdi) &&
- !test_bit(BDI_registered, &bdi->state)) {
- WARN_ON(1);
- printk(KERN_ERR "bdi-%s not registered\n",
- bdi->name);
+ bdi = inode_to_bdi(inode);
+
+ if (bdi_cap_writeback_dirty(bdi)) {
+ WARN(!test_bit(BDI_registered, &bdi->state),
+ "bdi-%s not registered\n", bdi->name);
+
+ /*
+ * If this is the first dirty inode for this
+ * bdi, we have to wake-up the corresponding
+ * bdi thread to make sure background
+ * write-back happens later.
+ */
+ if (!wb_has_dirty_io(&bdi->wb))
+ wakeup_bdi = true;
}
inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &wb->b_dirty);
+ list_move(&inode->i_list, &bdi->wb.b_dirty);
}
}
out:
spin_unlock(&inode_lock);
+
+ if (wakeup_bdi)
+ bdi_wakeup_thread_delayed(bdi);
}
EXPORT_SYMBOL(__mark_inode_dirty);
@@ -1001,7 +1032,7 @@ static void wait_sb_inodes(struct super_block *sb)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
struct address_space *mapping;
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
mapping = inode->i_mapping;
if (mapping->nrpages == 0)