Commit 64daa5d8 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

vmscan: convert lazy freeing to folios

Remove a hidden call to compound_head(), and account nr_pages instead of a
single page.  This matches the code in lru_lazyfree_fn() that accounts
nr_pages to PGLAZYFREE.

Link: https://lkml.kernel.org/r/20220504182857.4013401-12-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0a36111c
...@@ -1057,6 +1057,15 @@ static inline void count_memcg_page_event(struct page *page, ...@@ -1057,6 +1057,15 @@ static inline void count_memcg_page_event(struct page *page,
count_memcg_events(memcg, idx, 1); count_memcg_events(memcg, idx, 1);
} }
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
struct mem_cgroup *memcg = folio_memcg(folio);
if (memcg)
count_memcg_events(memcg, idx, nr);
}
static inline void count_memcg_event_mm(struct mm_struct *mm, static inline void count_memcg_event_mm(struct mm_struct *mm,
enum vm_event_item idx) enum vm_event_item idx)
{ {
...@@ -1494,6 +1503,11 @@ static inline void count_memcg_page_event(struct page *page, ...@@ -1494,6 +1503,11 @@ static inline void count_memcg_page_event(struct page *page,
{ {
} }
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
}
static inline static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{ {
......
...@@ -1902,20 +1902,20 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1902,20 +1902,20 @@ static unsigned int shrink_page_list(struct list_head *page_list,
} }
} }
if (PageAnon(page) && !PageSwapBacked(page)) { if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
/* follow __remove_mapping for reference */ /* follow __remove_mapping for reference */
if (!page_ref_freeze(page, 1)) if (!folio_ref_freeze(folio, 1))
goto keep_locked; goto keep_locked;
/* /*
* The page has only one reference left, which is * The folio has only one reference left, which is
* from the isolation. After the caller puts the * from the isolation. After the caller puts the
* page back on lru and drops the reference, the * folio back on the lru and drops the reference, the
* page will be freed anyway. It doesn't matter * folio will be freed anyway. It doesn't matter
* which lru it goes. So we don't bother checking * which lru it goes on. So we don't bother checking
* PageDirty here. * the dirty flag here.
*/ */
count_vm_event(PGLAZYFREED); count_vm_events(PGLAZYFREED, nr_pages);
count_memcg_page_event(page, PGLAZYFREED); count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
} else if (!mapping || !__remove_mapping(mapping, folio, true, } else if (!mapping || !__remove_mapping(mapping, folio, true,
sc->target_mem_cgroup)) sc->target_mem_cgroup))
goto keep_locked; goto keep_locked;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment