mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio

The PG_idle and PG_young bits are ignored if they're set on tail
pages, so ensure we're passing a folio around.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2aff7a47
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/page_ext.h> #include <linux/page_ext.h>
#include <linux/page_idle.h> #include <linux/page_idle.h>
#include "internal.h"
#define BITMAP_CHUNK_SIZE sizeof(u64) #define BITMAP_CHUNK_SIZE sizeof(u64)
#define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
...@@ -48,7 +50,8 @@ static bool page_idle_clear_pte_refs_one(struct page *page, ...@@ -48,7 +50,8 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, void *arg) unsigned long addr, void *arg)
{ {
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
bool referenced = false; bool referenced = false;
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
...@@ -70,19 +73,20 @@ static bool page_idle_clear_pte_refs_one(struct page *page, ...@@ -70,19 +73,20 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
} }
if (referenced) { if (referenced) {
clear_page_idle(page); folio_clear_idle(folio);
/* /*
* We cleared the referenced bit in a mapping to this page. To * We cleared the referenced bit in a mapping to this page. To
* avoid interference with page reclaim, mark it young so that * avoid interference with page reclaim, mark it young so that
* page_referenced() will return > 0. * page_referenced() will return > 0.
*/ */
set_page_young(page); folio_set_young(folio);
} }
return true; return true;
} }
static void page_idle_clear_pte_refs(struct page *page) static void page_idle_clear_pte_refs(struct page *page)
{ {
struct folio *folio = page_folio(page);
/* /*
* Since rwc.arg is unused, rwc is effectively immutable, so we * Since rwc.arg is unused, rwc is effectively immutable, so we
* can make it static const to save some cycles and stack. * can make it static const to save some cycles and stack.
...@@ -93,18 +97,17 @@ static void page_idle_clear_pte_refs(struct page *page) ...@@ -93,18 +97,17 @@ static void page_idle_clear_pte_refs(struct page *page)
}; };
bool need_lock; bool need_lock;
if (!page_mapped(page) || if (!folio_mapped(folio) || !folio_raw_mapping(folio))
!page_rmapping(page))
return; return;
need_lock = !PageAnon(page) || PageKsm(page); need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
if (need_lock && !trylock_page(page)) if (need_lock && !folio_trylock(folio))
return; return;
rmap_walk(page, (struct rmap_walk_control *)&rwc); rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
if (need_lock) if (need_lock)
unlock_page(page); folio_unlock(folio);
} }
static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment