diff options
-rw-r--r-- | fs/btrfs/extent-tree.c | 3 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/ext4/inode.c | 2 | ||||
-rw-r--r-- | fs/fs-writeback.c | 84 | ||||
-rw-r--r-- | fs/quota/quota.c | 2 | ||||
-rw-r--r-- | fs/sync.c | 4 | ||||
-rw-r--r-- | fs/ubifs/budget.c | 2 | ||||
-rw-r--r-- | include/linux/backing-dev.h | 14 | ||||
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | include/linux/writeback.h | 33 | ||||
-rw-r--r-- | include/trace/events/writeback.h | 161 | ||||
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | mm/backing-dev.c | 7 | ||||
-rw-r--r-- | mm/page-writeback.c | 704 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
15 files changed, 806 insertions, 225 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f5be06a2462..c9ee0e18bbd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3340,7 +3340,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, smp_mb(); nr_pages = min_t(unsigned long, nr_pages, root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); - writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); + writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages, + WB_REASON_FS_FREE_SPACE); spin_lock(&space_info->lock); if (reserved > space_info->bytes_reserved) diff --git a/fs/buffer.c b/fs/buffer.c index 70a19745cb6..19d8eb7fdc8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -288,7 +288,7 @@ static void free_more_memory(void) struct zone *zone; int nid; - wakeup_flusher_threads(1024); + wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); yield(); for_each_online_node(nid) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index cc5a6da030a..240f6e2dc7e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2372,7 +2372,7 @@ static int ext4_nonda_switch(struct super_block *sb) * start pushing delalloc when 1/2 of free blocks are dirty. */ if (free_blocks < 2 * dirty_blocks) - writeback_inodes_sb_if_idle(sb); + writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE); return 0; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 04cf3b91e50..73c3992b2bb 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -41,11 +41,23 @@ struct wb_writeback_work { unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; + enum wb_reason reason; /* why was writeback initiated? */ struct list_head list; /* pending work list */ struct completion *done; /* set if the caller waits */ }; +const char *wb_reason_name[] = { + [WB_REASON_BACKGROUND] = "background", + [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", + [WB_REASON_SYNC] = "sync", + [WB_REASON_PERIODIC] = "periodic", + [WB_REASON_LAPTOP_TIMER] = "laptop_timer", + [WB_REASON_FREE_MORE_MEM] = "free_more_memory", + [WB_REASON_FS_FREE_SPACE] = "fs_free_space", + [WB_REASON_FORKER_THREAD] = "forker_thread" +}; + /* * Include the creation of the trace points after defining the * wb_writeback_work structure so that the definition remains local to this @@ -115,7 +127,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, static void __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, - bool range_cyclic) + bool range_cyclic, enum wb_reason reason) { struct wb_writeback_work *work; @@ -135,6 +147,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, work->sync_mode = WB_SYNC_NONE; work->nr_pages = nr_pages; work->range_cyclic = range_cyclic; + work->reason = reason; bdi_queue_work(bdi, work); } @@ -150,9 +163,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, * completion. Caller need not hold sb s_umount semaphore. * */ -void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, + enum wb_reason reason) { - __bdi_start_writeback(bdi, nr_pages, true); + __bdi_start_writeback(bdi, nr_pages, true, reason); } /** @@ -251,7 +265,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - unsigned long *older_than_this) + struct wb_writeback_work *work) { LIST_HEAD(tmp); struct list_head *pos, *node; @@ -262,8 +276,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (work->older_than_this && + inode_dirtied_after(inode, *work->older_than_this)) break; if (sb && sb != inode->i_sb) do_sb_sort = 1; @@ -302,13 +316,13 @@ out: * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) { int moved; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); - trace_writeback_queue_io(wb, older_than_this, moved); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); + trace_writeback_queue_io(wb, work, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -641,31 +655,40 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, return wrote; } -long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + enum wb_reason reason) { struct wb_writeback_work work = { .nr_pages = nr_pages, .sync_mode = WB_SYNC_NONE, .range_cyclic = 1, + .reason = reason, }; spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, NULL); + queue_io(wb, &work); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); return nr_pages - work.nr_pages; } -static inline bool over_bground_thresh(void) +static bool over_bground_thresh(struct backing_dev_info *bdi) { unsigned long background_thresh, dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); - return (global_page_state(NR_FILE_DIRTY) + - global_page_state(NR_UNSTABLE_NFS) > background_thresh); + if (global_page_state(NR_FILE_DIRTY) + + global_page_state(NR_UNSTABLE_NFS) > background_thresh) + return true; + + if (bdi_stat(bdi, BDI_RECLAIMABLE) > + bdi_dirty_limit(bdi, background_thresh)) + return true; + + return false; } /* @@ -675,7 +698,7 @@ static inline bool over_bground_thresh(void) static void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) { - __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time); + __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); } /* @@ -727,7 +750,7 @@ static long wb_writeback(struct bdi_writeback *wb, * For background writeout, stop when we are below the * background dirty threshold */ - if (work->for_background && !over_bground_thresh()) + if (work->for_background && !over_bground_thresh(wb->bdi)) break; if (work->for_kupdate) { @@ -738,7 +761,7 @@ static long wb_writeback(struct bdi_writeback *wb, trace_writeback_start(wb->bdi, work); if (list_empty(&wb->b_io)) - queue_io(wb, work->older_than_this); + queue_io(wb, work); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else @@ -811,13 +834,14 @@ static unsigned long get_nr_dirty_pages(void) static long wb_check_background_flush(struct bdi_writeback *wb) { - if (over_bground_thresh()) { + if (over_bground_thresh(wb->bdi)) { struct wb_writeback_work work = { .nr_pages = LONG_MAX, .sync_mode = WB_SYNC_NONE, .for_background = 1, .range_cyclic = 1, + .reason = WB_REASON_BACKGROUND, }; return wb_writeback(wb, &work); @@ -851,6 +875,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) .sync_mode = WB_SYNC_NONE, .for_kupdate = 1, .range_cyclic = 1, + .reason = WB_REASON_PERIODIC, }; return wb_writeback(wb, &work); @@ -969,7 +994,7 @@ int bdi_writeback_thread(void *data) * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back * the whole world. */ -void wakeup_flusher_threads(long nr_pages) +void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) { struct backing_dev_info *bdi; @@ -982,7 +1007,7 @@ void wakeup_flusher_threads(long nr_pages) list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - __bdi_start_writeback(bdi, nr_pages, false); + __bdi_start_writeback(bdi, nr_pages, false, reason); } rcu_read_unlock(); } @@ -1203,7 +1228,9 @@ static void wait_sb_inodes(struct super_block *sb) * on how many (if any) will be written, and this function does not wait * for IO completion of submitted IO. */ -void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) +void writeback_inodes_sb_nr(struct super_block *sb, + unsigned long nr, + enum wb_reason reason) { DECLARE_COMPLETION_ONSTACK(done); struct wb_writeback_work work = { @@ -1212,6 +1239,7 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) .tagged_writepages = 1, .done = &done, .nr_pages = nr, + .reason = reason, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); @@ -1228,9 +1256,9 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr); * on how many (if any) will be written, and this function does not wait * for IO completion of submitted IO. */ -void writeback_inodes_sb(struct super_block *sb) +void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) { - return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); + return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); } EXPORT_SYMBOL(writeback_inodes_sb); @@ -1241,11 +1269,11 @@ EXPORT_SYMBOL(writeback_inodes_sb); * Invoke writeback_inodes_sb if no writeback is currently underway. * Returns 1 if writeback was started, 0 if not. */ -int writeback_inodes_sb_if_idle(struct super_block *sb) +int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason) { if (!writeback_in_progress(sb->s_bdi)) { down_read(&sb->s_umount); - writeback_inodes_sb(sb); + writeback_inodes_sb(sb, reason); up_read(&sb->s_umount); return 1; } else @@ -1262,11 +1290,12 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); * Returns 1 if writeback was started, 0 if not. */ int writeback_inodes_sb_nr_if_idle(struct super_block *sb, - unsigned long nr) + unsigned long nr, + enum wb_reason reason) { if (!writeback_in_progress(sb->s_bdi)) { down_read(&sb->s_umount); - writeback_inodes_sb_nr(sb, nr); + writeback_inodes_sb_nr(sb, nr, reason); up_read(&sb->s_umount); return 1; } else @@ -1290,6 +1319,7 @@ void sync_inodes_sb(struct super_block *sb) .nr_pages = LONG_MAX, .range_cyclic = 0, .done = &done, + .reason = WB_REASON_SYNC, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); diff --git a/fs/quota/quota.c b/fs/quota/quota.c index aae0edb95c6..35f4b0ecdeb 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -286,7 +286,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, /* caller already holds s_umount */ if (sb->s_flags & MS_RDONLY) return -EROFS; - writeback_inodes_sb(sb); + writeback_inodes_sb(sb, WB_REASON_SYNC); return 0; default: return -EINVAL; diff --git a/fs/sync.c b/fs/sync.c index c98a7477edf..101b8ef901d 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -43,7 +43,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) if (wait) sync_inodes_sb(sb); else - writeback_inodes_sb(sb); + writeback_inodes_sb(sb, WB_REASON_SYNC); if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, wait); @@ -98,7 +98,7 @@ static void sync_filesystems(int wait) */ SYSCALL_DEFINE0(sync) { - wakeup_flusher_threads(0); + wakeup_flusher_threads(0, WB_REASON_SYNC); sync_filesystems(0); sync_filesystems(1); if (unlikely(laptop_mode)) diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 315de66e52b..bc4f94b2870 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -63,7 +63,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write) { down_read(&c->vfs_sb->s_umount); - writeback_inodes_sb(c->vfs_sb); + writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); up_read(&c->vfs_sb->s_umount); } diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 3b2f9cb8298..b1038bd686a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int); enum bdi_stat_item { BDI_RECLAIMABLE, BDI_WRITEBACK, + BDI_DIRTIED, BDI_WRITTEN, NR_BDI_STAT_ITEMS }; @@ -74,10 +75,20 @@ struct backing_dev_info { struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; unsigned long bw_time_stamp; /* last time write bw is updated */ + unsigned long dirtied_stamp; unsigned long written_stamp; /* pages written at bw_time_stamp */ unsigned long write_bandwidth; /* the estimated write bandwidth */ unsigned long avg_write_bandwidth; /* further smoothed write bw */ + /* + * The base dirty throttle rate, re-calculated on every 200ms. + * All the bdi tasks' dirty rate will be curbed under it. + * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit + * in small steps and is much more smooth/stable than the latter. + */ + unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; + struct prop_local_percpu completions; int dirty_exceeded; @@ -107,7 +118,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); -void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, + enum wb_reason reason); void bdi_start_background_writeback(struct backing_dev_info *bdi); int bdi_writeback_thread(void *data); int bdi_has_dirty_io(struct backing_dev_info *bdi); diff --git a/include/linux/sched.h b/include/linux/sched.h index e8acce717d2..68daf4f27e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1522,6 +1522,13 @@ struct task_struct { int make_it_fail; #endif struct prop_local_single dirties; + /* + * when (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for some dirty throttling pause + */ + int nr_dirtied; + int nr_dirtied_pause; + #ifdef CONFIG_LATENCYTOP int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 2b8963ff0f3..a378c295851 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -39,6 +39,23 @@ enum writeback_sync_modes { }; /* + * why some writeback work was initiated + */ +enum wb_reason { + WB_REASON_BACKGROUND, + WB_REASON_TRY_TO_FREE_PAGES, + WB_REASON_SYNC, + WB_REASON_PERIODIC, + WB_REASON_LAPTOP_TIMER, + WB_REASON_FREE_MORE_MEM, + WB_REASON_FS_FREE_SPACE, + WB_REASON_FORKER_THREAD, + + WB_REASON_MAX, +}; +extern const char *wb_reason_name[]; + +/* * A control structure which tells the writeback code what to do. These are * always on the stack, and hence need no locking. They are always initialised * in a manner such that unspecified fields are set to zero. @@ -69,14 +86,17 @@ struct writeback_control { */ struct bdi_writeback; int inode_wait(void *); -void writeback_inodes_sb(struct super_block *); -void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); -int writeback_inodes_sb_if_idle(struct super_block *); -int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); +void writeback_inodes_sb(struct super_block *, enum wb_reason reason); +void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, + enum wb_reason reason); +int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason); +int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr, + enum wb_reason reason); void sync_inodes_sb(struct super_block *); -long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages); +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + enum wb_reason reason); long wb_do_writeback(struct bdi_writeback *wb, int force_wait); -void wakeup_flusher_threads(long nr_pages); +void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) @@ -143,6 +163,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, void __bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, + unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 5f172703eb4..b99caa8b780 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -34,6 +34,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(int, for_kupdate) __field(int, range_cyclic) __field(int, for_background) + __field(int, reason) ), TP_fast_assign( strncpy(__entry->name, dev_name(bdi->dev), 32); @@ -43,16 +44,18 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->for_kupdate = work->for_kupdate; __entry->range_cyclic = work->range_cyclic; __entry->for_background = work->for_background; + __entry->reason = work->reason; ), TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " - "kupdate=%d range_cyclic=%d background=%d", + "kupdate=%d range_cyclic=%d background=%d reason=%s", __entry->name, MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), __entry->nr_pages, __entry->sync_mode, __entry->for_kupdate, __entry->range_cyclic, - __entry->for_background + __entry->for_background, + wb_reason_name[__entry->reason] ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -104,30 +107,6 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register); DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); DEFINE_WRITEBACK_EVENT(writeback_thread_start); DEFINE_WRITEBACK_EVENT(writeback_thread_stop); -DEFINE_WRITEBACK_EVENT(balance_dirty_start); -DEFINE_WRITEBACK_EVENT(balance_dirty_wait); - -TRACE_EVENT(balance_dirty_written, - - TP_PROTO(struct backing_dev_info *bdi, int written), - - TP_ARGS(bdi, written), - - TP_STRUCT__entry( - __array(char, name, 32) - __field(int, written) - ), - - TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); - __entry->written = written; - ), - - TP_printk("bdi %s written %d", - __entry->name, - __entry->written - ) -); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), @@ -181,27 +160,31 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, - unsigned long *older_than_this, + struct wb_writeback_work *work, int moved), - TP_ARGS(wb, older_than_this, moved), + TP_ARGS(wb, work, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) __field(long, age) __field(int, moved) + __field(int, reason) ), TP_fast_assign( + unsigned long *older_than_this = work->older_than_this; strncpy(__entry->name, dev_name(wb->bdi->dev), 32); __entry->older = older_than_this ? *older_than_this : 0; __entry->age = older_than_this ? (jiffies - *older_than_this) * 1000 / HZ : -1; __entry->moved = moved; + __entry->reason = work->reason; ), - TP_printk("bdi %s: older=%lu age=%ld enqueue=%d", + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", __entry->name, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ - __entry->moved) + __entry->moved, + wb_reason_name[__entry->reason]) ); TRACE_EVENT(global_dirty_state, @@ -250,6 +233,124 @@ TRACE_EVENT(global_dirty_state, ) ); +#define KBps(x) ((x) << (PAGE_SHIFT - 10)) + +TRACE_EVENT(bdi_dirty_ratelimit, + + TP_PROTO(struct backing_dev_info *bdi, + unsigned long dirty_rate, + unsigned long task_ratelimit), + + TP_ARGS(bdi, dirty_rate, task_ratelimit), + + TP_STRUCT__entry( + __array(char, bdi, 32) + __field(unsigned long, write_bw) + __field(unsigned long, avg_write_bw) + __field(unsigned long, dirty_rate) + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned long, balanced_dirty_ratelimit) + ), + + TP_fast_assign( + strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + __entry->write_bw = KBps(bdi->write_bandwidth); + __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); + __entry->dirty_rate = KBps(dirty_rate); + __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->balanced_dirty_ratelimit = + KBps(bdi->balanced_dirty_ratelimit); + ), + + TP_printk("bdi %s: " + "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "balanced_dirty_ratelimit=%lu", + __entry->bdi, + __entry->write_bw, /* write bandwidth */ + __entry->avg_write_bw, /* avg write bandwidth */ + __entry->dirty_rate, /* bdi dirty rate */ + __entry->dirty_ratelimit, /* base ratelimit */ + __entry->task_ratelimit, /* ratelimit with position control */ + __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ + ) +); + +TRACE_EVENT(balance_dirty_pages, + + TP_PROTO(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long dirty_ratelimit, + unsigned long task_ratelimit, + unsigned long dirtied, + long pause, + unsigned long start_time), + + TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, + dirty_ratelimit, task_ratelimit, + dirtied, pause, start_time), + + TP_STRUCT__entry( + __array( char, bdi, 32) + __field(unsigned long, limit) + __field(unsigned long, setpoint) + __field(unsigned long, dirty) + __field(unsigned long, bdi_setpoint) + __field(unsigned long, bdi_dirty) + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned int, dirtied) + __field(unsigned int, dirtied_pause) + __field(unsigned long, paused) + __field( long, pause) + ), + + TP_fast_assign( + unsigned long freerun = (thresh + bg_thresh) / 2; + strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + + __entry->limit = global_dirty_limit; + __entry->setpoint = (global_dirty_limit + freerun) / 2; + __entry->dirty = dirty; + __entry->bdi_setpoint = __entry->setpoint * + bdi_thresh / (thresh + 1); + __entry->bdi_dirty = bdi_dirty; + __entry->dirty_ratelimit = KBps(dirty_ratelimit); + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->dirtied = dirtied; + __entry->dirtied_pause = current->nr_dirtied_pause; + __entry->pause = pause * 1000 / HZ; + __entry->paused = (jiffies - start_time) * 1000 / HZ; + ), + + + TP_printk("bdi %s: " + "limit=%lu setpoint=%lu dirty=%lu " + "bdi_setpoint=%lu bdi_dirty=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "dirtied=%u dirtied_pause=%u " + "paused=%lu pause=%ld", + __entry->bdi, + __entry->limit, + __entry->setpoint, + __entry->dirty, + __entry->bdi_setpoint, + __entry->bdi_dirty, + __entry->dirty_ratelimit, + __entry->task_ratelimit, + __entry->dirtied, + __entry->dirtied_pause, + __entry->paused, /* ms */ + __entry->pause /* ms */ + ) +); + DECLARE_EVENT_CLASS(writeback_congest_waited_template, TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), diff --git a/kernel/fork.c b/kernel/fork.c index 70d76191afb..ba0d1726132 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1299,6 +1299,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->pdeath_signal = 0; p->exit_state = 0; + p->nr_dirtied = 0; + p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); + /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 7520ef0bfd4..a0860640378 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -97,6 +97,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) "BdiDirtyThresh: %10lu kB\n" "DirtyThresh: %10lu kB\n" "BackgroundThresh: %10lu kB\n" + "BdiDirtied: %10lu kB\n" "BdiWritten: %10lu kB\n" "BdiWriteBandwidth: %10lu kBps\n" "b_dirty: %10lu\n" @@ -109,6 +110,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) K(bdi_thresh), K(dirty_thresh), K(background_thresh), + (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), (unsigned long) K(bdi->write_bandwidth), nr_dirty, @@ -473,7 +475,8 @@ static int bdi_forker_thread(void *ptr) * the bdi from the thread. Hopefully 1024 is * large enough for efficient IO. */ - writeback_inodes_wb(&bdi->wb, 1024); + writeback_inodes_wb(&bdi->wb, 1024, + WB_REASON_FORKER_THREAD); } else { /* * The spinlock makes sure we do not lose @@ -683,6 +686,8 @@ int bdi_init(struct backing_dev_info *bdi) bdi->bw_time_stamp = jiffies; bdi->written_stamp = 0; + bdi->balanced_dirty_ratelimit = INIT_BW; + bdi->dirty_ratelimit = INIT_BW; bdi->write_bandwidth = INIT_BW; bdi->avg_write_bandwidth = INIT_BW; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 793e9874de5..9c31199dae9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -46,26 +46,14 @@ */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) +#define RATELIMIT_CALC_SHIFT 10 + /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. */ static long ratelimit_pages = 32; -/* - * When balance_dirty_pages decides that the caller needs to perform some - * non-background writeback, this is how many pages it will attempt to write. - * It should be somewhat larger than dirtied pages to ensure that reasonably - * large amounts of I/O are submitted. - */ -static inline long sync_writeback_pages(unsigned long dirtied) -{ - if (dirtied < ratelimit_pages) - dirtied = ratelimit_pages; - - return dirtied + dirtied / 2; -} - /* The following parameters are exported via /proc/sys/vm */ /* @@ -167,6 +155,8 @@ static void update_completion_period(void) int shift = calc_period_shift(); prop_change_shift(&vm_completions, shift); prop_change_shift(&vm_dirties, shift); + + writeback_set_ratelimit(); } int dirty_background_ratio_handler(struct ctl_table *table, int write, @@ -260,50 +250,6 @@ static void bdi_writeout_fraction(struct backing_dev_info *bdi, numerator, denominator); } -static inline void task_dirties_fraction(struct task_struct *tsk, - long *numerator, long *denominator) -{ - prop_fraction_single(&vm_dirties, &tsk->dirties, - numerator, denominator); -} - -/* - * task_dirty_limit - scale down dirty throttling threshold for one task - * - * task specific dirty limit: - * - * dirty -= (dirty/8) * p_{t} - * - * To protect light/slow dirtying tasks from heavier/fast ones, we start - * throttling individual tasks before reaching the bdi dirty limit. - * Relatively low thresholds will be allocated to heavy dirtiers. So when - * dirty pages grow large, heavy dirtiers will be throttled first, which will - * effectively curb the growth of dirty pages. Light dirtiers with high enough - * dirty threshold may never get throttled. - */ -#define TASK_LIMIT_FRACTION 8 -static unsigned long task_dirty_limit(struct task_struct *tsk, - unsigned long bdi_dirty) -{ - long numerator, denominator; - unsigned long dirty = bdi_dirty; - u64 inv = dirty / TASK_LIMIT_FRACTION; - - task_dirties_fraction(tsk, &numerator, &denominator); - inv *= numerator; - do_div(inv, denominator); - - dirty -= inv; - - return max(dirty, bdi_dirty/2); -} - -/* Minimum limit for any task */ -static unsigned long task_min_dirty_limit(unsigned long bdi_dirty) -{ - return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION; -} - /* * bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not @@ -413,6 +359,12 @@ unsigned long determine_dirtyable_memory(void) return x + 1; /* Ensure that we never return 0 */ } +static unsigned long dirty_freerun_ceiling(unsigned long thresh, + unsigned long bg_thresh) +{ + return (thresh + bg_thresh) / 2; +} + static unsigned long hard_dirty_limit(unsigned long thresh) { return max(thresh, global_dirty_limit); @@ -497,6 +449,198 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) return bdi_dirty; } +/* + * Dirty position control. + * + * (o) global/bdi setpoints + * + * We want the dirty pages be balanced around the global/bdi setpoints. + * When the number of dirty pages is higher/lower than the setpoint, the + * dirty position control ratio (and hence task dirty ratelimit) will be + * decreased/increased to bring the dirty pages back to the setpoint. + * + * pos_ratio = 1 << RATELIMIT_CALC_SHIFT + * + * if (dirty < setpoint) scale up pos_ratio + * if (dirty > setpoint) scale down pos_ratio + * + * if (bdi_dirty < bdi_setpoint) scale up pos_ratio + * if (bdi_dirty > bdi_setpoint) scale down pos_ratio + * + * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT + * + * (o) global control line + * + * ^ pos_ratio + * | + * | |<===== global dirty control scope ======>| + * 2.0 .............* + * | .* + * | . * + * | . * + * | . * + * | . * + * | . * + * 1.0 ................................* + * | . . * + * | . . * + * | . . * + * | . . * + * | . . * + * 0 +------------.------------------.----------------------*-------------> + * freerun^ setpoint^ limit^ dirty pages + * + * (o) bdi control line + * + * ^ pos_ratio + * | + * | * + * | * + * | * + * | * + * | * |<=========== span ============>| + * 1.0 .......................* + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * | . * + * 1/4 ...............................................* * * * * * * * * * * * + * | . . + * | . . + * | . . + * 0 +----------------------.-------------------------------.-------------> + * bdi_setpoint^ x_intercept^ + * + * The bdi control line won't drop below pos_ratio= |