Commit 02c6de8d authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: cma: discard clean pages during contiguous allocation instead of migration

Drop clean cache pages instead of migration during alloc_contig_range() to
minimise allocation latency by reducing the amount of migration that is
necessary.  It's useful for CMA because latency of migration is more
important than evicting the background process's working set.  In
addition, as pages are reclaimed then fewer free pages for migration
targets are required so it avoids memory reclaiming to get free pages,
which is a contributory factor to increased latency.

I measured elapsed time of __alloc_contig_migrate_range() which migrates
10M in 40M movable zone in QEMU machine.

Before - 146ms, After - 7ms

[akpm@linux-foundation.org: fix nommu build]
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Reviewed-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Cc: Rik van Riel <riel@redhat.com>
Tested-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 70400303
...@@ -71,6 +71,17 @@ struct anon_vma_chain { ...@@ -71,6 +71,17 @@ struct anon_vma_chain {
#endif #endif
}; };
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
TTU_MUNLOCK = 2, /* munlock mode */
TTU_ACTION_MASK = 0xff,
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
};
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
static inline void get_anon_vma(struct anon_vma *anon_vma) static inline void get_anon_vma(struct anon_vma *anon_vma)
{ {
...@@ -164,16 +175,6 @@ int page_referenced(struct page *, int is_locked, ...@@ -164,16 +175,6 @@ int page_referenced(struct page *, int is_locked,
int page_referenced_one(struct page *, struct vm_area_struct *, int page_referenced_one(struct page *, struct vm_area_struct *,
unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
TTU_MUNLOCK = 2, /* munlock mode */
TTU_ACTION_MASK = 0xff,
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
};
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
int try_to_unmap(struct page *, enum ttu_flags flags); int try_to_unmap(struct page *, enum ttu_flags flags);
......
...@@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, ...@@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long); unsigned long, unsigned long);
extern void set_pageblock_order(void); extern void set_pageblock_order(void);
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *page_list);
#endif /* __MM_INTERNAL_H */ #endif /* __MM_INTERNAL_H */
...@@ -5700,6 +5700,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) ...@@ -5700,6 +5700,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
break; break;
} }
reclaim_clean_pages_from_list(cc.zone, &cc.migratepages);
ret = migrate_pages(&cc.migratepages, ret = migrate_pages(&cc.migratepages,
__alloc_contig_migrate_alloc, __alloc_contig_migrate_alloc,
0, false, MIGRATE_SYNC); 0, false, MIGRATE_SYNC);
......
...@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page, ...@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
static unsigned long shrink_page_list(struct list_head *page_list, static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone, struct zone *zone,
struct scan_control *sc, struct scan_control *sc,
enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty, unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback) unsigned long *ret_nr_writeback,
bool force_reclaim)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
...@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
mem_cgroup_uncharge_start(); mem_cgroup_uncharge_start();
while (!list_empty(page_list)) { while (!list_empty(page_list)) {
enum page_references references;
struct address_space *mapping; struct address_space *mapping;
struct page *page; struct page *page;
int may_enter_fs; int may_enter_fs;
enum page_references references = PAGEREF_RECLAIM_CLEAN;
cond_resched(); cond_resched();
...@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
if (!force_reclaim)
references = page_check_references(page, sc); references = page_check_references(page, sc);
switch (references) { switch (references) {
case PAGEREF_ACTIVATE: case PAGEREF_ACTIVATE:
goto activate_locked; goto activate_locked;
...@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* processes. Try to unmap it here. * processes. Try to unmap it here.
*/ */
if (page_mapped(page) && mapping) { if (page_mapped(page) && mapping) {
switch (try_to_unmap(page, TTU_UNMAP)) { switch (try_to_unmap(page, ttu_flags)) {
case SWAP_FAIL: case SWAP_FAIL:
goto activate_locked; goto activate_locked;
case SWAP_AGAIN: case SWAP_AGAIN:
...@@ -960,6 +964,33 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -960,6 +964,33 @@ static unsigned long shrink_page_list(struct list_head *page_list,
return nr_reclaimed; return nr_reclaimed;
} }
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *page_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
.may_unmap = 1,
};
unsigned long ret, dummy1, dummy2;
struct page *page, *next;
LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
if (page_is_file_cache(page) && !PageDirty(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
}
ret = shrink_page_list(&clean_pages, zone, &sc,
TTU_UNMAP|TTU_IGNORE_ACCESS,
&dummy1, &dummy2, true);
list_splice(&clean_pages, page_list);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret;
}
/* /*
* Attempt to remove the specified page from its LRU. Only take this page * Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being * if it is of the appropriate PageActive status. Pages which are being
...@@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (nr_taken == 0) if (nr_taken == 0)
return 0; return 0;
nr_reclaimed = shrink_page_list(&page_list, zone, sc, nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
&nr_dirty, &nr_writeback); &nr_dirty, &nr_writeback, false);
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment