Commit aac8d41c authored by Jens Axboe's avatar Jens Axboe

writeback: only allow one inflight and pending full flush

When someone calls wakeup_flusher_threads() or
wakeup_flusher_threads_bdi(), they schedule writeback of all dirty
pages in the system (or on that bdi). If we are tight on memory, we
can get tons of these queued from kswapd/vmscan. This causes (at
least) two problems:

1) We consume a ton of memory just allocating writeback work items.
   We've seen as much as 600 million of these writeback work items
   pending. That's a lot of memory to pointlessly hold hostage,
   while the box is under memory pressure.

2) We spend so much time processing these work items, that we
   introduce a softlockup in writeback processing. This is because
   each of the writeback work items don't end up doing any work (it's
   hard when you have millions of identical ones coming in to the
   flush machinery), so we just sit in a tight loop pulling work
   items and deleting/freeing them.

Fix this by adding a 'start_all' bit to the writeback structure, and
set that when someone attempts to flush all dirty pages. The bit is
cleared when we start writeback on that work item. If the bit is
already set when we attempt to queue !nr_pages writeback, then we
simply ignore it.

This provides us one full flush in flight, with one pending as well,
and makes for more efficient handling of this type of writeback.
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Tested-by: default avatarChris Mason <clm@fb.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e8e8a0c6
...@@ -53,6 +53,7 @@ struct wb_writeback_work { ...@@ -53,6 +53,7 @@ struct wb_writeback_work {
unsigned int for_background:1; unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int auto_free:1; /* free on completion */ unsigned int auto_free:1; /* free on completion */
unsigned int start_all:1; /* nr_pages == 0 (all) writeback */
enum wb_reason reason; /* why was writeback initiated? */ enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */ struct list_head list; /* pending work list */
...@@ -951,6 +952,20 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) ...@@ -951,6 +952,20 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
if (!wb_has_dirty_io(wb)) if (!wb_has_dirty_io(wb))
return; return;
/*
* All callers of this function want to start writeback of all
* dirty pages. Places like vmscan can call this at a very
* high frequency, causing pointless allocations of tons of
* work items and keeping the flusher threads busy retrieving
* that work. Ensure that we only allow one of them pending and
* inflight at the time. It doesn't matter if we race a little
* bit on this, so use the faster separate test/set bit variants.
*/
if (test_bit(WB_start_all, &wb->state))
return;
set_bit(WB_start_all, &wb->state);
/* /*
* This is WB_SYNC_NONE writeback, so if allocation fails just * This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback * wakeup the thread for old dirty data writeback
...@@ -958,6 +973,7 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) ...@@ -958,6 +973,7 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
work = kzalloc(sizeof(*work), work = kzalloc(sizeof(*work),
GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (!work) { if (!work) {
clear_bit(WB_start_all, &wb->state);
trace_writeback_nowork(wb); trace_writeback_nowork(wb);
wb_wakeup(wb); wb_wakeup(wb);
return; return;
...@@ -968,6 +984,7 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) ...@@ -968,6 +984,7 @@ static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
work->range_cyclic = 1; work->range_cyclic = 1;
work->reason = reason; work->reason = reason;
work->auto_free = 1; work->auto_free = 1;
work->start_all = 1;
wb_queue_work(wb, work); wb_queue_work(wb, work);
} }
...@@ -1821,6 +1838,14 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) ...@@ -1821,6 +1838,14 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
list_del_init(&work->list); list_del_init(&work->list);
} }
spin_unlock_bh(&wb->work_lock); spin_unlock_bh(&wb->work_lock);
/*
* Once we start processing a work item that had !nr_pages,
* clear the wb state bit for that so we can allow more.
*/
if (work && work->start_all)
clear_bit(WB_start_all, &wb->state);
return work; return work;
} }
......
...@@ -24,6 +24,7 @@ enum wb_state { ...@@ -24,6 +24,7 @@ enum wb_state {
WB_shutting_down, /* wb_shutdown() in progress */ WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */ WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
WB_start_all, /* nr_pages == 0 (all) work pending */
}; };
enum wb_congested_state { enum wb_congested_state {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment