Commit f01ef569 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback: (27 commits)
  mm: properly reflect task dirty limits in dirty_exceeded logic
  writeback: don't busy retry writeback on new/freeing inodes
  writeback: scale IO chunk size up to half device bandwidth
  writeback: trace global_dirty_state
  writeback: introduce max-pause and pass-good dirty limits
  writeback: introduce smoothed global dirty limit
  writeback: consolidate variable names in balance_dirty_pages()
  writeback: show bdi write bandwidth in debugfs
  writeback: bdi write bandwidth estimation
  writeback: account per-bdi accumulated written pages
  writeback: make writeback_control.nr_to_write straight
  writeback: skip tmpfs early in balance_dirty_pages_ratelimited_nr()
  writeback: trace event writeback_queue_io
  writeback: trace event writeback_single_inode
  writeback: remove .nonblocking and .encountered_congestion
  writeback: remove writeback_control.more_io
  writeback: skip balance_dirty_pages() for in-memory fs
  writeback: add bdi_dirty_limit() kernel-doc
  writeback: avoid extra sync work at enqueue time
  writeback: elevate queue_io() into wb_writeback()
  ...

Fix up trivial conflicts in fs/fs-writeback.c and mm/filemap.c
parents a93a1329 bcff25fc
...@@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode) ...@@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode)
{ {
return &BDEV_I(inode)->bdev; return &BDEV_I(inode)->bdev;
} }
EXPORT_SYMBOL(I_BDEV); EXPORT_SYMBOL(I_BDEV);
/* /*
* move the inode from it's current bdi to the a new bdi. if the inode is dirty * Move the inode from its current bdi to a new bdi. If the inode is dirty we
* we need to move it onto the dirty list of @dst so that the inode is always * need to move it onto the dirty list of @dst so that the inode is always on
* on the right list. * the right list.
*/ */
static void bdev_inode_switch_bdi(struct inode *inode, static void bdev_inode_switch_bdi(struct inode *inode,
struct backing_dev_info *dst) struct backing_dev_info *dst)
{ {
spin_lock(&inode_wb_list_lock); struct backing_dev_info *old = inode->i_data.backing_dev_info;
if (unlikely(dst == old)) /* deadlock avoidance */
return;
bdi_lock_two(&old->wb, &dst->wb);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
inode->i_data.backing_dev_info = dst; inode->i_data.backing_dev_info = dst;
if (inode->i_state & I_DIRTY) if (inode->i_state & I_DIRTY)
list_move(&inode->i_wb_list, &dst->wb.b_dirty); list_move(&inode->i_wb_list, &dst->wb.b_dirty);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&inode_wb_list_lock); spin_unlock(&old->wb.list_lock);
spin_unlock(&dst->wb.list_lock);
} }
static sector_t max_block(struct block_device *bdev) static sector_t max_block(struct block_device *bdev)
......
...@@ -2551,7 +2551,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, ...@@ -2551,7 +2551,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
}; };
struct writeback_control wbc_writepages = { struct writeback_control wbc_writepages = {
.sync_mode = wbc->sync_mode, .sync_mode = wbc->sync_mode,
.older_than_this = NULL,
.nr_to_write = 64, .nr_to_write = 64,
.range_start = page_offset(page) + PAGE_CACHE_SIZE, .range_start = page_offset(page) + PAGE_CACHE_SIZE,
.range_end = (loff_t)-1, .range_end = (loff_t)-1,
...@@ -2584,7 +2583,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, ...@@ -2584,7 +2583,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
}; };
struct writeback_control wbc_writepages = { struct writeback_control wbc_writepages = {
.sync_mode = mode, .sync_mode = mode,
.older_than_this = NULL,
.nr_to_write = nr_pages * 2, .nr_to_write = nr_pages * 2,
.range_start = start, .range_start = start,
.range_end = end + 1, .range_end = end + 1,
......
...@@ -2741,7 +2741,7 @@ static int write_cache_pages_da(struct address_space *mapping, ...@@ -2741,7 +2741,7 @@ static int write_cache_pages_da(struct address_space *mapping,
index = wbc->range_start >> PAGE_CACHE_SHIFT; index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE; tag = PAGECACHE_TAG_TOWRITE;
else else
tag = PAGECACHE_TAG_DIRTY; tag = PAGECACHE_TAG_DIRTY;
...@@ -2973,7 +2973,7 @@ static int ext4_da_writepages(struct address_space *mapping, ...@@ -2973,7 +2973,7 @@ static int ext4_da_writepages(struct address_space *mapping,
} }
retry: retry:
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end); tag_pages_for_writeback(mapping, index, end);
while (!ret && wbc->nr_to_write > 0) { while (!ret && wbc->nr_to_write > 0) {
......
This diff is collapsed.
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
* inode->i_sb->s_inode_lru, inode->i_lru * inode->i_sb->s_inode_lru, inode->i_lru
* inode_sb_list_lock protects: * inode_sb_list_lock protects:
* sb->s_inodes, inode->i_sb_list * sb->s_inodes, inode->i_sb_list
* inode_wb_list_lock protects: * bdi->wb.list_lock protects:
* bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
* inode_hash_lock protects: * inode_hash_lock protects:
* inode_hashtable, inode->i_hash * inode_hashtable, inode->i_hash
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* inode->i_lock * inode->i_lock
* inode->i_sb->s_inode_lru_lock * inode->i_sb->s_inode_lru_lock
* *
* inode_wb_list_lock * bdi->wb.list_lock
* inode->i_lock * inode->i_lock
* *
* inode_hash_lock * inode_hash_lock
...@@ -65,7 +65,6 @@ static struct hlist_head *inode_hashtable __read_mostly; ...@@ -65,7 +65,6 @@ static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
/* /*
* Empty aops. Can be used for the cases where the user does not * Empty aops. Can be used for the cases where the user does not
......
...@@ -1566,8 +1566,7 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -1566,8 +1566,7 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
int status; int status;
bool sync = true; bool sync = true;
if (wbc->sync_mode == WB_SYNC_NONE || wbc->nonblocking || if (wbc->sync_mode == WB_SYNC_NONE)
wbc->for_background)
sync = false; sync = false;
status = pnfs_layoutcommit_inode(inode, sync); status = pnfs_layoutcommit_inode(inode, sync);
......
...@@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int); ...@@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int);
enum bdi_stat_item { enum bdi_stat_item {
BDI_RECLAIMABLE, BDI_RECLAIMABLE,
BDI_WRITEBACK, BDI_WRITEBACK,
BDI_WRITTEN,
NR_BDI_STAT_ITEMS NR_BDI_STAT_ITEMS
}; };
...@@ -57,6 +58,7 @@ struct bdi_writeback { ...@@ -57,6 +58,7 @@ struct bdi_writeback {
struct list_head b_dirty; /* dirty inodes */ struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */ struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */ struct list_head b_more_io; /* parked for more writeback */
spinlock_t list_lock; /* protects the b_* lists */
}; };
struct backing_dev_info { struct backing_dev_info {
...@@ -71,6 +73,11 @@ struct backing_dev_info { ...@@ -71,6 +73,11 @@ struct backing_dev_info {
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long written_stamp; /* pages written at bw_time_stamp */
unsigned long write_bandwidth; /* the estimated write bandwidth */
unsigned long avg_write_bandwidth; /* further smoothed write bw */
struct prop_local_percpu completions; struct prop_local_percpu completions;
int dirty_exceeded; int dirty_exceeded;
...@@ -106,6 +113,7 @@ int bdi_writeback_thread(void *data); ...@@ -106,6 +113,7 @@ int bdi_writeback_thread(void *data);
int bdi_has_dirty_io(struct backing_dev_info *bdi); int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_arm_supers_timer(void); void bdi_arm_supers_timer(void);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
extern spinlock_t bdi_lock; extern spinlock_t bdi_lock;
extern struct list_head bdi_list; extern struct list_head bdi_list;
......
...@@ -7,9 +7,39 @@ ...@@ -7,9 +7,39 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/fs.h> #include <linux/fs.h>
struct backing_dev_info; /*
* The 1/4 region under the global dirty thresh is for smooth dirty throttling:
*
* (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
*
* The 1/16 region above the global dirty limit will be put to maximum pauses:
*
* (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
*
* The 1/16 region above the max-pause region, dirty exceeded bdi's will be put
* to loops:
*
* (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA)
*
* Further beyond, all dirtier tasks will enter a loop waiting (possibly long
* time) for the dirty pages to drop, unless written enough pages.
*
* The global dirty threshold is normally equal to the global dirty limit,
* except when the system suddenly allocates a lot of anonymous memory and
* knocks down the global dirty threshold quickly, in which case the global
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
*/
#define DIRTY_SCOPE 8
#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
#define DIRTY_MAXPAUSE_AREA 16
#define DIRTY_PASSGOOD_AREA 8
/*
* 4MB minimal write chunk size
*/
#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
extern spinlock_t inode_wb_list_lock; struct backing_dev_info;
/* /*
* fs/fs-writeback.c * fs/fs-writeback.c
...@@ -26,11 +56,6 @@ enum writeback_sync_modes { ...@@ -26,11 +56,6 @@ enum writeback_sync_modes {
*/ */
struct writeback_control { struct writeback_control {
enum writeback_sync_modes sync_mode; enum writeback_sync_modes sync_mode;
unsigned long *older_than_this; /* If !NULL, only write back inodes
older than this */
unsigned long wb_start; /* Time writeback_inodes_wb was
called. This is needed to avoid
extra jobs and livelock */
long nr_to_write; /* Write this many pages, and decrement long nr_to_write; /* Write this many pages, and decrement
this for each page written */ this for each page written */
long pages_skipped; /* Pages which were not written */ long pages_skipped; /* Pages which were not written */
...@@ -43,13 +68,11 @@ struct writeback_control { ...@@ -43,13 +68,11 @@ struct writeback_control {
loff_t range_start; loff_t range_start;
loff_t range_end; loff_t range_end;
unsigned nonblocking:1; /* Don't get stuck on request queues */
unsigned encountered_congestion:1; /* An output: a queue is full */
unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_background:1; /* A background writeback */ unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */ unsigned range_cyclic:1; /* range_start is cyclic */
unsigned more_io:1; /* more io to be dispatched */
}; };
/* /*
...@@ -62,8 +85,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); ...@@ -62,8 +85,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr);
int writeback_inodes_sb_if_idle(struct super_block *); int writeback_inodes_sb_if_idle(struct super_block *);
int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr);
void sync_inodes_sb(struct super_block *); void sync_inodes_sb(struct super_block *);
void writeback_inodes_wb(struct bdi_writeback *wb, long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages);
struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait); long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
void wakeup_flusher_threads(long nr_pages); void wakeup_flusher_threads(long nr_pages);
...@@ -94,6 +116,8 @@ static inline void laptop_sync_completion(void) { } ...@@ -94,6 +116,8 @@ static inline void laptop_sync_completion(void) { }
#endif #endif
void throttle_vm_writeout(gfp_t gfp_mask); void throttle_vm_writeout(gfp_t gfp_mask);
extern unsigned long global_dirty_limit;
/* These are exported to sysctl. */ /* These are exported to sysctl. */
extern int dirty_background_ratio; extern int dirty_background_ratio;
extern unsigned long dirty_background_bytes; extern unsigned long dirty_background_bytes;
...@@ -128,6 +152,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); ...@@ -128,6 +152,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
unsigned long dirty); unsigned long dirty);
void __bdi_update_bandwidth(struct backing_dev_info *bdi,
unsigned long thresh,
unsigned long dirty,
unsigned long bdi_thresh,
unsigned long bdi_dirty,
unsigned long start_time);
void page_writeback_init(void); void page_writeback_init(void);
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
unsigned long nr_pages_dirtied); unsigned long nr_pages_dirtied);
......
...@@ -284,7 +284,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, ...@@ -284,7 +284,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( long, pages_skipped ) __field( long, pages_skipped )
__field( loff_t, range_start ) __field( loff_t, range_start )
__field( loff_t, range_end ) __field( loff_t, range_end )
__field( char, nonblocking )
__field( char, for_kupdate ) __field( char, for_kupdate )
__field( char, for_reclaim ) __field( char, for_reclaim )
__field( char, range_cyclic ) __field( char, range_cyclic )
...@@ -299,7 +298,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, ...@@ -299,7 +298,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->pages_skipped = wbc->pages_skipped; __entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start; __entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end; __entry->range_end = wbc->range_end;
__entry->nonblocking = wbc->nonblocking;
__entry->for_kupdate = wbc->for_kupdate; __entry->for_kupdate = wbc->for_kupdate;
__entry->for_reclaim = wbc->for_reclaim; __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic; __entry->range_cyclic = wbc->range_cyclic;
...@@ -310,13 +308,13 @@ DECLARE_EVENT_CLASS(btrfs__writepage, ...@@ -310,13 +308,13 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, " TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
"range_end = %llu, nonblocking = %d, for_kupdate = %d, " "range_end = %llu, for_kupdate = %d, "
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
show_root_type(__entry->root_objectid), show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, __entry->index, (unsigned long)__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped, __entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end, __entry->range_start, __entry->range_end,
__entry->nonblocking, __entry->for_kupdate, __entry->for_kupdate,
__entry->for_reclaim, __entry->range_cyclic, __entry->for_reclaim, __entry->range_cyclic,
(unsigned long)__entry->writeback_index) (unsigned long)__entry->writeback_index)
); );
......
...@@ -380,7 +380,6 @@ TRACE_EVENT(ext4_da_writepages_result, ...@@ -380,7 +380,6 @@ TRACE_EVENT(ext4_da_writepages_result,
__field( int, pages_written ) __field( int, pages_written )
__field( long, pages_skipped ) __field( long, pages_skipped )
__field( int, sync_mode ) __field( int, sync_mode )
__field( char, more_io )
__field( pgoff_t, writeback_index ) __field( pgoff_t, writeback_index )
), ),
...@@ -391,16 +390,15 @@ TRACE_EVENT(ext4_da_writepages_result, ...@@ -391,16 +390,15 @@ TRACE_EVENT(ext4_da_writepages_result,
__entry->pages_written = pages_written; __entry->pages_written = pages_written;
__entry->pages_skipped = wbc->pages_skipped; __entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode; __entry->sync_mode = wbc->sync_mode;
__entry->more_io = wbc->more_io;
__entry->writeback_index = inode->i_mapping->writeback_index; __entry->writeback_index = inode->i_mapping->writeback_index;
), ),
TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld " TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
" more_io %d sync_mode %d writeback_index %lu", "sync_mode %d writeback_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->ret, (unsigned long) __entry->ino, __entry->ret,
__entry->pages_written, __entry->pages_skipped, __entry->pages_written, __entry->pages_skipped,
__entry->more_io, __entry->sync_mode, __entry->sync_mode,
(unsigned long) __entry->writeback_index) (unsigned long) __entry->writeback_index)
); );
......
...@@ -8,6 +8,19 @@ ...@@ -8,6 +8,19 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#define show_inode_state(state) \
__print_flags(state, "|", \
{I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
{I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
{I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
{I_NEW, "I_NEW"}, \
{I_WILL_FREE, "I_WILL_FREE"}, \
{I_FREEING, "I_FREEING"}, \
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_REFERENCED, "I_REFERENCED"} \
)
struct wb_writeback_work; struct wb_writeback_work;
DECLARE_EVENT_CLASS(writeback_work_class, DECLARE_EVENT_CLASS(writeback_work_class,
...@@ -49,6 +62,9 @@ DEFINE_EVENT(writeback_work_class, name, \ ...@@ -49,6 +62,9 @@ DEFINE_EVENT(writeback_work_class, name, \
DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread); DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
TRACE_EVENT(writeback_pages_written, TRACE_EVENT(writeback_pages_written,
TP_PROTO(long pages_written), TP_PROTO(long pages_written),
...@@ -88,6 +104,30 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register); ...@@ -88,6 +104,30 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DEFINE_WRITEBACK_EVENT(writeback_thread_start); DEFINE_WRITEBACK_EVENT(writeback_thread_start);
DEFINE_WRITEBACK_EVENT(writeback_thread_stop); DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
DEFINE_WRITEBACK_EVENT(balance_dirty_start);
DEFINE_WRITEBACK_EVENT(balance_dirty_wait);
TRACE_EVENT(balance_dirty_written,
TP_PROTO(struct backing_dev_info *bdi, int written),
TP_ARGS(bdi, written),
TP_STRUCT__entry(
__array(char, name, 32)
__field(int, written)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(bdi->dev), 32);
__entry->written = written;
),
TP_printk("bdi %s written %d",
__entry->name,
__entry->written
)
);
DECLARE_EVENT_CLASS(wbc_class, DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
...@@ -101,8 +141,6 @@ DECLARE_EVENT_CLASS(wbc_class, ...@@ -101,8 +141,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, for_background) __field(int, for_background)
__field(int, for_reclaim) __field(int, for_reclaim)
__field(int, range_cyclic) __field(int, range_cyclic)
__field(int, more_io)
__field(unsigned long, older_than_this)
__field(long, range_start) __field(long, range_start)
__field(long, range_end) __field(long, range_end)
), ),
...@@ -116,15 +154,12 @@ DECLARE_EVENT_CLASS(wbc_class, ...@@ -116,15 +154,12 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->for_background = wbc->for_background; __entry->for_background = wbc->for_background;
__entry->for_reclaim = wbc->for_reclaim; __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic; __entry->range_cyclic = wbc->range_cyclic;
__entry->more_io = wbc->more_io;
__entry->older_than_this = wbc->older_than_this ?
*wbc->older_than_this : 0;
__entry->range_start = (long)wbc->range_start; __entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end; __entry->range_end = (long)wbc->range_end;
), ),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx " "bgrd=%d reclm=%d cyclic=%d "
"start=0x%lx end=0x%lx", "start=0x%lx end=0x%lx",
__entry->name, __entry->name,
__entry->nr_to_write, __entry->nr_to_write,
...@@ -134,8 +169,6 @@ DECLARE_EVENT_CLASS(wbc_class, ...@@ -134,8 +169,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->for_background, __entry->for_background,
__entry->for_reclaim, __entry->for_reclaim,
__entry->range_cyclic, __entry->range_cyclic,
__entry->more_io,
__entry->older_than_this,
__entry->range_start, __entry->range_start,
__entry->range_end) __entry->range_end)
) )
...@@ -144,14 +177,79 @@ DECLARE_EVENT_CLASS(wbc_class, ...@@ -144,14 +177,79 @@ DECLARE_EVENT_CLASS(wbc_class,
DEFINE_EVENT(wbc_class, name, \ DEFINE_EVENT(wbc_class, name, \
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
TP_ARGS(wbc, bdi)) TP_ARGS(wbc, bdi))
DEFINE_WBC_EVENT(wbc_writeback_start);
DEFINE_WBC_EVENT(wbc_writeback_written);
DEFINE_WBC_EVENT(wbc_writeback_wait);
DEFINE_WBC_EVENT(wbc_balance_dirty_start);
DEFINE_WBC_EVENT(wbc_balance_dirty_written);
DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
DEFINE_WBC_EVENT(wbc_writepage); DEFINE_WBC_EVENT(wbc_writepage);
TRACE_EVENT(writeback_queue_io,
TP_PROTO(struct bdi_writeback *wb,
unsigned long *older_than_this,
int moved),
TP_ARGS(wb, older_than_this, moved),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, older)
__field(long, age)
__field(int, moved)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
),
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved)
);
TRACE_EVENT(global_dirty_state,
TP_PROTO(unsigned long background_thresh,
unsigned long dirty_thresh
),
TP_ARGS(background_thresh,
dirty_thresh
),
TP_STRUCT__entry(
__field(unsigned long, nr_dirty)
__field(unsigned long, nr_writeback)
__field(unsigned long, nr_unstable)
__field(unsigned long, background_thresh)
__field(unsigned long, dirty_thresh)
__field(unsigned long, dirty_limit)
__field(unsigned long, nr_dirtied)
__field(unsigned long, nr_written)
),
TP_fast_assign(
__entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
__entry->nr_writeback = global_page_state(NR_WRITEBACK);
__entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
__entry->nr_dirtied = global_page_state(NR_DIRTIED);
__entry->nr_written = global_page_state(NR_WRITTEN);
__entry->background_thresh = background_thresh;
__entry->dirty_thresh = dirty_thresh;
__entry->dirty_limit = global_dirty_limit;
),
TP_printk("dirty=%lu writeback=%lu unstable=%lu "
"bg_thresh=%lu thresh=%lu limit=%lu "
"dirtied=%lu written=%lu",
__entry->nr_dirty,
__entry->nr_writeback,
__entry->nr_unstable,
__entry->background_thresh,
__entry->dirty_thresh,
__entry->dirty_limit,
__entry->nr_dirtied,
__entry->nr_written
)
);
DECLARE_EVENT_CLASS(writeback_congest_waited_template, DECLARE_EVENT_CLASS(writeback_congest_waited_template,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
...@@ -187,6 +285,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, ...@@ -187,6 +285,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
TP_ARGS(usec_timeout, usec_delayed) TP_ARGS(usec_timeout, usec_delayed)
); );
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
unsigned long nr_to_write
),
TP_ARGS(inode, wbc, nr_to_write),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, age)
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
),
TP_fast_assign(
strncpy(__entry->name,
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->age = (jiffies - inode->dirtied_when) *
1000 / HZ;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
),
TP_printk("bdi %s: ino=%lu state=%s age=%lu "
"index=%lu to_write=%ld wrote=%lu",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->age,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote
)
);
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
unsigned long nr_to_write),
TP_ARGS(inode, wbc, nr_to_write)
);
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
unsigned long nr_to_write),
TP_ARGS(inode, wbc, nr_to_write)
);
#endif /* _TRACE_WRITEBACK_H */ #endif /* _TRACE_WRITEBACK_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -45,6 +45,17 @@ static struct timer_list sync_supers_timer; ...@@ -45,6 +45,17 @@ static struct timer_list sync_supers_timer;
static int bdi_sync_supers(void *); static int bdi_sync_supers(void *);
static void sync_supers_timer_fn(unsigned long); static void sync_supers_timer_fn(unsigned long);
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
{
if (wb1 < wb2) {
spin_lock(&wb1->list_lock);
spin_lock_nested(&wb2->list_lock, 1);
} else {
spin_lock(&wb2->list_lock);
spin_lock_nested(&wb1->list_lock, 1);
}
}
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
...@@ -67,34 +78,42 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) ...@@ -67,34 +78,42 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
struct inode *inode; struct inode *inode;
nr_dirty = nr_io = nr_more_io = 0; nr_dirty = nr_io = nr_more_io = 0;
spin_lock(&inode_wb_list_lock); spin_lock(&wb->list_lock);
list_for_each_entry(inode, &wb->b_dirty, i_wb_list) list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
nr_dirty++; nr_dirty++;
list_for_each_entry(inode, &wb->b_io, i_wb_list) list_for_each_entry(inode, &wb->b_io, i_wb_list)
nr_io++; nr_io++;
list_for_each_entry(inode, &wb->b_more_io, i_wb_list) list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
nr_more_io++; nr_more_io++;
spin_unlock(&inode_wb_list_lock); spin_unlock(&wb->list_lock);
global_dirty_limits(&background_thresh, &dirty_thresh); global_dirty_limits(&background_thresh, &dirty_thresh);
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
#define K(x) ((x) << (PAGE_SHIFT - 10)) #define K(x) ((x) << (PAGE_SHIFT - 10))
seq_printf(m, seq_printf(m,
"BdiWriteback: %8lu kB\n" "BdiWriteback: %10lu kB\n"
"BdiReclaimable: %8lu kB\n" "BdiReclaimable: %10lu kB\n"
"BdiDirtyThresh: %8lu kB\n" "BdiDirtyThresh: %10lu kB\n"
"DirtyThresh: %8lu kB\n" "DirtyThresh: %10lu kB\n"
"BackgroundThresh: %8lu kB\n" "BackgroundThresh: %10lu kB\n"
"b_dirty: %8lu\n" "BdiWritten: %10lu kB\n"
"b_io: %8lu\n" "BdiWriteBandwidth: %10lu kBps\n"
"b_more_io: %8lu\n" "b_dirty: %10lu\n"
"bdi_list: %8u\n" "b_io: %10lu\n"
"state: %8lx\n", "b_more_io: %10lu\n"
"bdi_list: %10u\n"
"state: %10lx\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
K(bdi_thresh), K(dirty_thresh), K(bdi_thresh),
K(background_thresh), nr_dirty, nr_io, nr_more_io, K(dirty_thresh),
K(background_thresh),
(unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
(unsigned long) K(bdi->write_bandwidth),
nr_dirty,
nr_io,
nr_more_io,
!list_empty(&bdi->bdi_list), bdi->state); !list_empty(&bdi->bdi_list), bdi->state);
#undef K #undef K
...@@ -249,18 +268,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) ...@@ -249,18 +268,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
return wb_has_dirty_io(&bdi->wb); return wb_has_dirty_io(&bdi->wb);
} }
static void bdi_flush_io(struct backing_dev_info *bdi)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.range_cyclic = 1,
.nr_to_write = 1024,
};
writeback_inodes_wb(&bdi->wb, &wbc);
}
/* /*
* kupdated() used to do this. We cannot do it from the bdi_forker_thread() * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
* or we risk deadlocking on ->s_umount. The longer term solution would be * or we risk deadlocking on ->s_umount. The longer term solution would be
...@@ -446,9 +453,10 @@ static int bdi_forker_thread(void *ptr) ...@@ -446,9 +453,10 @@ static int bdi_forker_thread(void *ptr)
if (IS_ERR(task)) { if (IS_ERR(task)) {
/* /*
* If thread creation fails, force writeout of * If thread creation fails, force writeout of
* the bdi from the thread. * the bdi from the thread. Hopefully 1024 is
* large enough for efficient IO.
*/ */
bdi_flush_io(bdi); writeback_inodes_wb(&bdi->wb, 1024);
} else { } else {
/* /*
* The spinlock makes sure we do not lose * The spinlock makes sure we do not lose
...@@ -629,9 +637,15 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) ...@@ -629,9 +637,15 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_dirty);
INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_io);
INIT_LIST_HEAD(&wb->b_more_io); INIT_LIST_HEAD(&wb->b_more_io);
spin_lock_init(&wb->list_lock);
setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
} }
/*
* Initial write bandwidth: 100 MB/s
*/
#define INIT_BW (100 << (20 - PAGE_SHIFT))
int bdi_init(struct backing_dev_info *bdi) int bdi_init(struct backing_dev_info *bdi)
{ {
int i, err; int i, err;
...@@ -654,6 +668,13 @@ int bdi_init(struct backing_dev_info *bdi) ...@@ -654,6 +668,13 @@ int bdi_init(struct backing_dev_info *bdi)
} }
bdi->dirty_exceeded = 0; bdi->dirty_exceeded = 0;
bdi->bw_time_stamp = jiffies;
bdi->written_stamp = 0;
bdi->write_bandwidth = INIT_BW;
bdi->avg_write_bandwidth = INIT_BW;
err = prop_local_init_percpu(&bdi->completions); err = prop_local_init_percpu(&bdi->completions);
if (err) { if (err) {
...@@ -677,11 +698,12 @@ void bdi_destroy(struct backing_dev_info *bdi) ...@@ -677,11 +698,12 @@ void bdi_destroy(struct backing_dev_info *bdi)
if (bdi_has_dirty_io(bdi)) { if (bdi_has_dirty_io(bdi)) {
struct bdi_writeback *dst = &default_backing_dev_info.wb; struct bdi_writeback *dst = &default_backing_dev_info.wb;
spin_lock(&inode_wb_list_lock); bdi_lock_two(&bdi->wb, dst);
list_splice(&bdi->wb.b_dirty, &dst->b_dirty); list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
list_splice(&bdi->wb.b_io, &dst->b_io); list_splice(&bdi->wb.b_io, &dst->b_io);
list_splice(&bdi->wb.b_more_io, &dst->b_more_io); list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
spin_unlock(&inode_wb_list_lock); spin_unlock(&bdi->wb.list_lock);
spin_unlock(&dst->list_lock);
} }
bdi_unregister(bdi); bdi_unregister(bdi);
......
...@@ -78,7 +78,7 @@ ...@@ -78,7 +78,7 @@
* ->i_mutex (generic_file_buffered_write) * ->i_mutex (generic_file_buffered_write)
* ->mmap_sem (fault_in_pages_readable->do_page_fault) * ->mmap_sem (fault_in_pages_readable->do_page_fault)
* *
* inode_wb_list_lock * bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c) * sb_lock (fs/fs-writeback.c)
* ->mapping->tree_lock (__sync_single_inode) * ->mapping->tree_lock (__sync_single_inode)
* *
...@@ -96,9 +96,9 @@ ...@@ -96,9 +96,9 @@
* ->zone.lru_lock (check_pte_range->isolate_lru_page) * ->zone.lru_lock (check_pte_range->isolate_lru_page)
* ->private_lock (page_remove_rmap->set_page_dirty) * ->private_lock (page_remove_rmap->set_page_dirty)
* ->tree_lock (page_remove_rmap->set_page_dirty) * ->tree_lock (page_remove_rmap->set_page_dirty)
* inode_wb_list_lock (page_remove_rmap->set_page_dirty) * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
* ->inode->i_lock (page_remove_rmap->set_page_dirty) * ->inode->i_lock (page_remove_rmap->set_page_dirty)
* inode_wb_list_lock (zap_pte_range->set_page_dirty) * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers) * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
* *
......
This diff is collapsed.
...@@ -31,11 +31,11 @@ ...@@ -31,11 +31,11 @@
* mmlist_lock (in mmput, drain_mmlist and others) * mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers) * mapping->private_lock (in __set_page_dirty_buffers)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty) * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
* inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c) * sb_lock (within inode_lock in fs/fs-writeback.c)
* mapping->tree_lock (widely used, in set_page_dirty, * mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock, * in arch-dependent flush_dcache_mmap_lock,
* within inode_wb_list_lock in __sync_single_inode) * within bdi.wb->list_lock in __sync_single_inode)
* *
* anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon) * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
* ->tasklist_lock * ->tasklist_lock
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment