Commit c28a0e96 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

vmscan: remove remaining uses of page in shrink_page_list

These are all straightforward conversions to the folio API.

Link: https://lkml.kernel.org/r/20220504182857.4013401-16-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent dc786690
...@@ -1507,11 +1507,11 @@ static unsigned int demote_page_list(struct list_head *demote_pages, ...@@ -1507,11 +1507,11 @@ static unsigned int demote_page_list(struct list_head *demote_pages,
return nr_succeeded; return nr_succeeded;
} }
static bool may_enter_fs(struct page *page, gfp_t gfp_mask) static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
{ {
if (gfp_mask & __GFP_FS) if (gfp_mask & __GFP_FS)
return true; return true;
if (!PageSwapCache(page) || !(gfp_mask & __GFP_IO)) if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
return false; return false;
/* /*
* We can "enter_fs" for swap-cache with only __GFP_IO * We can "enter_fs" for swap-cache with only __GFP_IO
...@@ -1520,7 +1520,7 @@ static bool may_enter_fs(struct page *page, gfp_t gfp_mask) ...@@ -1520,7 +1520,7 @@ static bool may_enter_fs(struct page *page, gfp_t gfp_mask)
* but that will never affect SWP_FS_OPS, so the data_race * but that will never affect SWP_FS_OPS, so the data_race
* is safe. * is safe.
*/ */
return !data_race(page_swap_flags(page) & SWP_FS_OPS); return !data_race(page_swap_flags(&folio->page) & SWP_FS_OPS);
} }
/* /*
...@@ -1547,7 +1547,6 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1547,7 +1547,6 @@ static unsigned int shrink_page_list(struct list_head *page_list,
retry: retry:
while (!list_empty(page_list)) { while (!list_empty(page_list)) {
struct address_space *mapping; struct address_space *mapping;
struct page *page;
struct folio *folio; struct folio *folio;
enum page_references references = PAGEREF_RECLAIM; enum page_references references = PAGEREF_RECLAIM;
bool dirty, writeback; bool dirty, writeback;
...@@ -1557,19 +1556,18 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1557,19 +1556,18 @@ static unsigned int shrink_page_list(struct list_head *page_list,
folio = lru_to_folio(page_list); folio = lru_to_folio(page_list);
list_del(&folio->lru); list_del(&folio->lru);
page = &folio->page;
if (!trylock_page(page)) if (!folio_trylock(folio))
goto keep; goto keep;
VM_BUG_ON_PAGE(PageActive(page), page); VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
nr_pages = compound_nr(page); nr_pages = folio_nr_pages(folio);
/* Account the number of base pages even though THP */ /* Account the number of base pages */
sc->nr_scanned += nr_pages; sc->nr_scanned += nr_pages;
if (unlikely(!page_evictable(page))) if (unlikely(!folio_evictable(folio)))
goto activate_locked; goto activate_locked;
if (!sc->may_unmap && folio_mapped(folio)) if (!sc->may_unmap && folio_mapped(folio))
...@@ -1578,7 +1576,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1578,7 +1576,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
/* /*
* The number of dirty pages determines if a node is marked * The number of dirty pages determines if a node is marked
* reclaim_congested. kswapd will stall and start writing * reclaim_congested. kswapd will stall and start writing
* pages if the tail of the LRU is all dirty unqueued pages. * folios if the tail of the LRU is all dirty unqueued folios.
*/ */
folio_check_dirty_writeback(folio, &dirty, &writeback); folio_check_dirty_writeback(folio, &dirty, &writeback);
if (dirty || writeback) if (dirty || writeback)
...@@ -1588,21 +1586,21 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1588,21 +1586,21 @@ static unsigned int shrink_page_list(struct list_head *page_list,
stat->nr_unqueued_dirty += nr_pages; stat->nr_unqueued_dirty += nr_pages;
/* /*
* Treat this page as congested if * Treat this folio as congested if folios are cycling
* pages are cycling through the LRU so quickly that the * through the LRU so quickly that the folios marked
* pages marked for immediate reclaim are making it to the * for immediate reclaim are making it to the end of
* end of the LRU a second time. * the LRU a second time.
*/ */
if (writeback && PageReclaim(page)) if (writeback && folio_test_reclaim(folio))
stat->nr_congested += nr_pages; stat->nr_congested += nr_pages;
/* /*
* If a folio at the tail of the LRU is under writeback, there * If a folio at the tail of the LRU is under writeback, there
* are three cases to consider. * are three cases to consider.
* *
* 1) If reclaim is encountering an excessive number of folios * 1) If reclaim is encountering an excessive number
* under writeback and this folio is both under * of folios under writeback and this folio has both
* writeback and has the reclaim flag set then it * the writeback and reclaim flags set, then it
* indicates that folios are being queued for I/O but * indicates that folios are being queued for I/O but
* are being recycled through the LRU before the I/O * are being recycled through the LRU before the I/O
* can complete. Waiting on the folio itself risks an * can complete. Waiting on the folio itself risks an
...@@ -1651,19 +1649,19 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1651,19 +1649,19 @@ static unsigned int shrink_page_list(struct list_head *page_list,
/* Case 2 above */ /* Case 2 above */
} else if (writeback_throttling_sane(sc) || } else if (writeback_throttling_sane(sc) ||
!folio_test_reclaim(folio) || !folio_test_reclaim(folio) ||
!may_enter_fs(page, sc->gfp_mask)) { !may_enter_fs(folio, sc->gfp_mask)) {
/* /*
* This is slightly racy - * This is slightly racy -
* folio_end_writeback() might have just * folio_end_writeback() might have
* cleared the reclaim flag, then setting * just cleared the reclaim flag, then
* reclaim here ends up interpreted as * setting the reclaim flag here ends up
* the readahead flag - but that does * interpreted as the readahead flag - but
* not matter enough to care. What we * that does not matter enough to care.
* do want is for this folio to have * What we do want is for this folio to
* the reclaim flag set next time memcg * have the reclaim flag set next time
* reclaim reaches the tests above, so * memcg reclaim reaches the tests above,
* it will then folio_wait_writeback() * so it will then wait for writeback to
* to avoid OOM; and it's also appropriate * avoid OOM; and it's also appropriate
* in global reclaim. * in global reclaim.
*/ */
folio_set_reclaim(folio); folio_set_reclaim(folio);
...@@ -1691,37 +1689,37 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1691,37 +1689,37 @@ static unsigned int shrink_page_list(struct list_head *page_list,
goto keep_locked; goto keep_locked;
case PAGEREF_RECLAIM: case PAGEREF_RECLAIM:
case PAGEREF_RECLAIM_CLEAN: case PAGEREF_RECLAIM_CLEAN:
; /* try to reclaim the page below */ ; /* try to reclaim the folio below */
} }
/* /*
* Before reclaiming the page, try to relocate * Before reclaiming the folio, try to relocate
* its contents to another node. * its contents to another node.
*/ */
if (do_demote_pass && if (do_demote_pass &&
(thp_migration_supported() || !PageTransHuge(page))) { (thp_migration_supported() || !folio_test_large(folio))) {
list_add(&page->lru, &demote_pages); list_add(&folio->lru, &demote_pages);
unlock_page(page); folio_unlock(folio);
continue; continue;
} }
/* /*
* Anonymous process memory has backing store? * Anonymous process memory has backing store?
* Try to allocate it some swap space here. * Try to allocate it some swap space here.
* Lazyfree page could be freed directly * Lazyfree folio could be freed directly
*/ */
if (PageAnon(page) && PageSwapBacked(page)) { if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
if (!PageSwapCache(page)) { if (!folio_test_swapcache(folio)) {
if (!(sc->gfp_mask & __GFP_IO)) if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked; goto keep_locked;
if (folio_maybe_dma_pinned(folio)) if (folio_maybe_dma_pinned(folio))
goto keep_locked; goto keep_locked;
if (PageTransHuge(page)) { if (folio_test_large(folio)) {
/* cannot split THP, skip it */ /* cannot split folio, skip it */
if (!can_split_folio(folio, NULL)) if (!can_split_folio(folio, NULL))
goto activate_locked; goto activate_locked;
/* /*
* Split pages without a PMD map right * Split folios without a PMD map right
* away. Chances are some or all of the * away. Chances are some or all of the
* tail pages can be freed without IO. * tail pages can be freed without IO.
*/ */
...@@ -1744,20 +1742,19 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1744,20 +1742,19 @@ static unsigned int shrink_page_list(struct list_head *page_list,
goto activate_locked_split; goto activate_locked_split;
} }
} }
} else if (PageSwapBacked(page) && PageTransHuge(page)) { } else if (folio_test_swapbacked(folio) &&
/* Split shmem THP */ folio_test_large(folio)) {
/* Split shmem folio */
if (split_folio_to_list(folio, page_list)) if (split_folio_to_list(folio, page_list))
goto keep_locked; goto keep_locked;
} }
/* /*
* THP may get split above, need minus tail pages and update * If the folio was split above, the tail pages will make
* nr_pages to avoid accounting tail pages twice. * their own pass through this function and be accounted
* * then.
* The tail pages that are added into swap cache successfully
* reach here.
*/ */
if ((nr_pages > 1) && !PageTransHuge(page)) { if ((nr_pages > 1) && !folio_test_large(folio)) {
sc->nr_scanned -= (nr_pages - 1); sc->nr_scanned -= (nr_pages - 1);
nr_pages = 1; nr_pages = 1;
} }
...@@ -1815,7 +1812,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1815,7 +1812,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (references == PAGEREF_RECLAIM_CLEAN) if (references == PAGEREF_RECLAIM_CLEAN)
goto keep_locked; goto keep_locked;
if (!may_enter_fs(page, sc->gfp_mask)) if (!may_enter_fs(folio, sc->gfp_mask))
goto keep_locked; goto keep_locked;
if (!sc->may_writepage) if (!sc->may_writepage)
goto keep_locked; goto keep_locked;
...@@ -1917,11 +1914,11 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1917,11 +1914,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
sc->target_mem_cgroup)) sc->target_mem_cgroup))
goto keep_locked; goto keep_locked;
unlock_page(page); folio_unlock(folio);
free_it: free_it:
/* /*
* THP may get swapped out in a whole, need account * Folio may get swapped out as a whole, need to account
* all base pages. * all pages in it.
*/ */
nr_reclaimed += nr_pages; nr_reclaimed += nr_pages;
...@@ -1929,10 +1926,10 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1929,10 +1926,10 @@ static unsigned int shrink_page_list(struct list_head *page_list,
* Is there need to periodically free_page_list? It would * Is there need to periodically free_page_list? It would
* appear not as the counts should be low * appear not as the counts should be low
*/ */
if (unlikely(PageTransHuge(page))) if (unlikely(folio_test_large(folio)))
destroy_compound_page(page); destroy_compound_page(&folio->page);
else else
list_add(&page->lru, &free_pages); list_add(&folio->lru, &free_pages);
continue; continue;
activate_locked_split: activate_locked_split:
...@@ -1958,18 +1955,19 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1958,18 +1955,19 @@ static unsigned int shrink_page_list(struct list_head *page_list,
count_memcg_folio_events(folio, PGACTIVATE, nr_pages); count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
} }
keep_locked: keep_locked:
unlock_page(page); folio_unlock(folio);
keep: keep:
list_add(&page->lru, &ret_pages); list_add(&folio->lru, &ret_pages);
VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
folio_test_unevictable(folio), folio);
} }
/* 'page_list' is always empty here */ /* 'page_list' is always empty here */
/* Migrate pages selected for demotion */ /* Migrate folios selected for demotion */
nr_reclaimed += demote_page_list(&demote_pages, pgdat); nr_reclaimed += demote_page_list(&demote_pages, pgdat);
/* Pages that could not be demoted are still in @demote_pages */ /* Folios that could not be demoted are still in @demote_pages */
if (!list_empty(&demote_pages)) { if (!list_empty(&demote_pages)) {
/* Pages which failed to demoted go back on @page_list for retry: */ /* Folios which weren't demoted go back on @page_list for retry: */
list_splice_init(&demote_pages, page_list); list_splice_init(&demote_pages, page_list);
do_demote_pass = false; do_demote_pass = false;
goto retry; goto retry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment