Commit 04edf02c authored by Matthew Wilcox's avatar Matthew Wilcox

fs: Convert writeback to XArray

A couple of short loops.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent ec82e1c1
......@@ -339,9 +339,9 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
struct radix_tree_iter iter;
XA_STATE(xas, &mapping->i_pages, 0);
struct page *page;
bool switched = false;
void **slot;
/*
* By the time control reaches here, RCU grace period has passed
......@@ -375,26 +375,19 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
* pages actually under writeback.
*/
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
PAGECACHE_TAG_DIRTY) {
struct page *page = radix_tree_deref_slot_protected(slot,
&mapping->i_pages.xa_lock);
if (likely(page) && PageDirty(page)) {
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
if (PageDirty(page)) {
dec_wb_stat(old_wb, WB_RECLAIMABLE);
inc_wb_stat(new_wb, WB_RECLAIMABLE);
}
}
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
PAGECACHE_TAG_WRITEBACK) {
struct page *page = radix_tree_deref_slot_protected(slot,
&mapping->i_pages.xa_lock);
if (likely(page)) {
xas_set(&xas, 0);
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
WARN_ON_ONCE(!PageWriteback(page));
dec_wb_stat(old_wb, WB_WRITEBACK);
inc_wb_stat(new_wb, WB_WRITEBACK);
}
}
wb_get(new_wb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment