mm/vmscan: Turn page_check_dirty_writeback() into folio_check_dirty_writeback()

Saves a few calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent cbcc268b
...@@ -1437,7 +1437,7 @@ static enum page_references page_check_references(struct page *page, ...@@ -1437,7 +1437,7 @@ static enum page_references page_check_references(struct page *page,
} }
/* Check if a page is dirty or under writeback */ /* Check if a page is dirty or under writeback */
static void page_check_dirty_writeback(struct page *page, static void folio_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback) bool *dirty, bool *writeback)
{ {
struct address_space *mapping; struct address_space *mapping;
...@@ -1446,24 +1446,24 @@ static void page_check_dirty_writeback(struct page *page, ...@@ -1446,24 +1446,24 @@ static void page_check_dirty_writeback(struct page *page,
* Anonymous pages are not handled by flushers and must be written * Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them * from reclaim context. Do not stall reclaim based on them
*/ */
if (!page_is_file_lru(page) || if (!folio_is_file_lru(folio) ||
(PageAnon(page) && !PageSwapBacked(page))) { (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
*dirty = false; *dirty = false;
*writeback = false; *writeback = false;
return; return;
} }
/* By default assume that the page flags are accurate */ /* By default assume that the folio flags are accurate */
*dirty = PageDirty(page); *dirty = folio_test_dirty(folio);
*writeback = PageWriteback(page); *writeback = folio_test_writeback(folio);
/* Verify dirty/writeback state if the filesystem supports it */ /* Verify dirty/writeback state if the filesystem supports it */
if (!page_has_private(page)) if (!folio_test_private(folio))
return; return;
mapping = page_mapping(page); mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback) if (mapping && mapping->a_ops->is_dirty_writeback)
mapping->a_ops->is_dirty_writeback(page, dirty, writeback); mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback);
} }
static struct page *alloc_demote_page(struct page *page, unsigned long node) static struct page *alloc_demote_page(struct page *page, unsigned long node)
...@@ -1572,7 +1572,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1572,7 +1572,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
* reclaim_congested. kswapd will stall and start writing * reclaim_congested. kswapd will stall and start writing
* pages if the tail of the LRU is all dirty unqueued pages. * pages if the tail of the LRU is all dirty unqueued pages.
*/ */
page_check_dirty_writeback(page, &dirty, &writeback); folio_check_dirty_writeback(folio, &dirty, &writeback);
if (dirty || writeback) if (dirty || writeback)
stat->nr_dirty++; stat->nr_dirty++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment