Commit 2864f3d0 authored by Barry Song's avatar Barry Song Committed by Andrew Morton

mm: madvise: pageout: ignore references rather than clearing young

While doing MADV_PAGEOUT, the current code will clear PTE young so that
vmscan won't read young flags to allow the reclamation of madvised folios
to go ahead.  It seems we can do it by directly ignoring references, thus
we can remove tlb flush in madvise and rmap overhead in vmscan.

Regarding the side effect, in the original code, if a parallel thread runs
side by side to access the madvised memory with the thread doing madvise,
folios will get a chance to be re-activated by vmscan (though the time gap
is actually quite small since checking PTEs is done immediately after
clearing PTEs young).  But with this patch, they will still be reclaimed. 
But this behaviour doing PAGEOUT and doing access at the same time is
quite silly like DoS.  So probably, we don't need to care.  Or ignoring
the new access during the quite small time gap is even better.

For DAMON's DAMOS_PAGEOUT based on physical address region, we still keep
its behaviour as is since a physical address might be mapped by multiple
processes.  MADV_PAGEOUT based on virtual address is actually much more
aggressive on reclamation.  To untouch paddr's DAMOS_PAGEOUT, we simply
pass ignore_references as false in reclaim_pages().

A microbench as below has shown 6% decrement on the latency of
MADV_PAGEOUT,

 #define PGSIZE 4096
 main()
 {
 	int i;
 #define SIZE 512*1024*1024
 	volatile long *p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,
 			MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);

 	for (i = 0; i < SIZE/sizeof(long); i += PGSIZE / sizeof(long))
 		p[i] =  0x11;

 	madvise(p, SIZE, MADV_PAGEOUT);
 }

w/o patch                    w/ patch
root@10:~# time ./a.out      root@10:~# time ./a.out
real	0m49.634s            real   0m46.334s
user	0m0.637s             user   0m0.648s
sys	0m47.434s            sys    0m44.265s

Link: https://lkml.kernel.org/r/20240226005739.24350-1-21cnbao@gmail.comSigned-off-by: default avatarBarry Song <v-songbaohua@oppo.com>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 94c18d5f
...@@ -249,7 +249,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) ...@@ -249,7 +249,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
put_folio: put_folio:
folio_put(folio); folio_put(folio);
} }
applied = reclaim_pages(&folio_list); applied = reclaim_pages(&folio_list, false);
cond_resched(); cond_resched();
return applied * PAGE_SIZE; return applied * PAGE_SIZE;
} }
......
...@@ -869,7 +869,7 @@ extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, ...@@ -869,7 +869,7 @@ extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long); unsigned long, unsigned long);
extern void set_pageblock_order(void); extern void set_pageblock_order(void);
unsigned long reclaim_pages(struct list_head *folio_list); unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references);
unsigned int reclaim_clean_pages_from_list(struct zone *zone, unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *folio_list); struct list_head *folio_list);
/* The ALLOC_WMARK bits are used as an index to zone->watermark */ /* The ALLOC_WMARK bits are used as an index to zone->watermark */
......
...@@ -386,7 +386,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -386,7 +386,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
return 0; return 0;
} }
if (pmd_young(orig_pmd)) { if (!pageout && pmd_young(orig_pmd)) {
pmdp_invalidate(vma, addr, pmd); pmdp_invalidate(vma, addr, pmd);
orig_pmd = pmd_mkold(orig_pmd); orig_pmd = pmd_mkold(orig_pmd);
...@@ -410,7 +410,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -410,7 +410,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
huge_unlock: huge_unlock:
spin_unlock(ptl); spin_unlock(ptl);
if (pageout) if (pageout)
reclaim_pages(&folio_list); reclaim_pages(&folio_list, true);
return 0; return 0;
} }
...@@ -490,7 +490,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -490,7 +490,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
VM_BUG_ON_FOLIO(folio_test_large(folio), folio); VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
if (pte_young(ptent)) { if (!pageout && pte_young(ptent)) {
ptent = ptep_get_and_clear_full(mm, addr, pte, ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm); tlb->fullmm);
ptent = pte_mkold(ptent); ptent = pte_mkold(ptent);
...@@ -524,7 +524,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -524,7 +524,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
pte_unmap_unlock(start_pte, ptl); pte_unmap_unlock(start_pte, ptl);
} }
if (pageout) if (pageout)
reclaim_pages(&folio_list); reclaim_pages(&folio_list, true);
cond_resched(); cond_resched();
return 0; return 0;
......
...@@ -2085,7 +2085,8 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -2085,7 +2085,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
} }
static unsigned int reclaim_folio_list(struct list_head *folio_list, static unsigned int reclaim_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat) struct pglist_data *pgdat,
bool ignore_references)
{ {
struct reclaim_stat dummy_stat; struct reclaim_stat dummy_stat;
unsigned int nr_reclaimed; unsigned int nr_reclaimed;
...@@ -2098,7 +2099,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list, ...@@ -2098,7 +2099,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
.no_demotion = 1, .no_demotion = 1,
}; };
nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false); nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, ignore_references);
while (!list_empty(folio_list)) { while (!list_empty(folio_list)) {
folio = lru_to_folio(folio_list); folio = lru_to_folio(folio_list);
list_del(&folio->lru); list_del(&folio->lru);
...@@ -2108,7 +2109,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list, ...@@ -2108,7 +2109,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
return nr_reclaimed; return nr_reclaimed;
} }
unsigned long reclaim_pages(struct list_head *folio_list) unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references)
{ {
int nid; int nid;
unsigned int nr_reclaimed = 0; unsigned int nr_reclaimed = 0;
...@@ -2130,11 +2131,12 @@ unsigned long reclaim_pages(struct list_head *folio_list) ...@@ -2130,11 +2131,12 @@ unsigned long reclaim_pages(struct list_head *folio_list)
continue; continue;
} }
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid),
ignore_references);
nid = folio_nid(lru_to_folio(folio_list)); nid = folio_nid(lru_to_folio(folio_list));
} while (!list_empty(folio_list)); } while (!list_empty(folio_list));
nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), ignore_references);
memalloc_noreclaim_restore(noreclaim_flag); memalloc_noreclaim_restore(noreclaim_flag);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment