Commit af40e35a authored by SeongJae Park's avatar SeongJae Park Committed by Andrew Morton

mm/damon/paddr: rename 'damon_pa_access_chk_result->page_sz' to 'folio_sz'

DAMON's physical address space monitoring operations set is using folio
now.  Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact.

Link: https://lkml.kernel.org/r/20230109213335.62525-5-sj@kernel.orgSigned-off-by: default avatarSeongJae Park <sj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7477d756
...@@ -80,7 +80,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) ...@@ -80,7 +80,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
} }
struct damon_pa_access_chk_result { struct damon_pa_access_chk_result {
unsigned long page_sz; /* size of the folio for the access checked physical memory address */
unsigned long folio_sz;
bool accessed; bool accessed;
}; };
...@@ -91,7 +92,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, ...@@ -91,7 +92,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
result->accessed = false; result->accessed = false;
result->page_sz = PAGE_SIZE; result->folio_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address; addr = pvmw.address;
if (pvmw.pte) { if (pvmw.pte) {
...@@ -103,7 +104,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, ...@@ -103,7 +104,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
result->accessed = pmd_young(*pvmw.pmd) || result->accessed = pmd_young(*pvmw.pmd) ||
!folio_test_idle(folio) || !folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr); mmu_notifier_test_young(vma->vm_mm, addr);
result->page_sz = HPAGE_PMD_SIZE; result->folio_sz = HPAGE_PMD_SIZE;
#else #else
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
...@@ -118,11 +119,11 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, ...@@ -118,11 +119,11 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
return !result->accessed; return !result->accessed;
} }
static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
{ {
struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
struct damon_pa_access_chk_result result = { struct damon_pa_access_chk_result result = {
.page_sz = PAGE_SIZE, .folio_sz = PAGE_SIZE,
.accessed = false, .accessed = false,
}; };
struct rmap_walk_control rwc = { struct rmap_walk_control rwc = {
...@@ -157,25 +158,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) ...@@ -157,25 +158,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
folio_put(folio); folio_put(folio);
out: out:
*page_sz = result.page_sz; *folio_sz = result.folio_sz;
return result.accessed; return result.accessed;
} }
static void __damon_pa_check_access(struct damon_region *r) static void __damon_pa_check_access(struct damon_region *r)
{ {
static unsigned long last_addr; static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE; static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed; static bool last_accessed;
/* If the region is in the last checked page, reuse the result */ /* If the region is in the last checked page, reuse the result */
if (ALIGN_DOWN(last_addr, last_page_sz) == if (ALIGN_DOWN(last_addr, last_folio_sz) ==
ALIGN_DOWN(r->sampling_addr, last_page_sz)) { ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
if (last_accessed) if (last_accessed)
r->nr_accesses++; r->nr_accesses++;
return; return;
} }
last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz); last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
if (last_accessed) if (last_accessed)
r->nr_accesses++; r->nr_accesses++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment