Commit dc67647b authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/cma: change fallback behaviour for CMA freepage

Freepage with MIGRATE_CMA can be used only for MIGRATE_MOVABLE and they
should not be expanded to other migratetype buddy list to protect them
from unmovable/reclaimable allocation.  Implementing these requirements in
__rmqueue_fallback(), that is, finding largest possible block of freepage
has bad effect that high order freepage with MIGRATE_CMA are broken
continually although there are suitable order CMA freepage.  Reason is
that they are not be expanded to other migratetype buddy list and next
__rmqueue_fallback() invocation try to finds another largest block of
freepage and break it again.  So, MIGRATE_CMA fallback should be handled
separately.  This patch introduces __rmqueue_cma_fallback(), that just
wrapper of __rmqueue_smallest() and call it before __rmqueue_fallback() if
migratetype == MIGRATE_MOVABLE.

This results in unintended behaviour change that MIGRATE_CMA freepage is
always used first rather than other migratetype as movable allocation's
fallback.  But, as already mentioned above, MIGRATE_CMA can be used only
for MIGRATE_MOVABLE, so it is better to use MIGRATE_CMA freepage first as
much as possible.  Otherwise, we needlessly take up precious freepages
with other migratetype and increase chance of fragmentation.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 30467e0b
...@@ -1032,11 +1032,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, ...@@ -1032,11 +1032,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
static int fallbacks[MIGRATE_TYPES][4] = { static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */ [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
#else
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#endif #endif
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
#ifdef CONFIG_MEMORY_ISOLATION #ifdef CONFIG_MEMORY_ISOLATION
...@@ -1044,6 +1042,17 @@ static int fallbacks[MIGRATE_TYPES][4] = { ...@@ -1044,6 +1042,17 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif #endif
}; };
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
{
return __rmqueue_smallest(zone, order, MIGRATE_CMA);
}
#else
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order) { return NULL; }
#endif
/* /*
* Move the free pages in a range to the free lists of the requested type. * Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock * Note that start_page and end_pages are not aligned on a pageblock
...@@ -1195,19 +1204,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) ...@@ -1195,19 +1204,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
struct page, lru); struct page, lru);
area->nr_free--; area->nr_free--;
if (!is_migrate_cma(migratetype)) { try_to_steal_freepages(zone, page, start_migratetype,
try_to_steal_freepages(zone, page,
start_migratetype,
migratetype); migratetype);
} else {
/*
* When borrowing from MIGRATE_CMA, we need to
* release the excess buddy pages to CMA
* itself, and we do not try to steal extra
* free pages.
*/
buddy_type = migratetype;
}
/* Remove the page from the freelists */ /* Remove the page from the freelists */
list_del(&page->lru); list_del(&page->lru);
...@@ -1249,6 +1247,10 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, ...@@ -1249,6 +1247,10 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
page = __rmqueue_smallest(zone, order, migratetype); page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
if (migratetype == MIGRATE_MOVABLE)
page = __rmqueue_cma_fallback(zone, order);
if (!page)
page = __rmqueue_fallback(zone, order, migratetype); page = __rmqueue_fallback(zone, order, migratetype);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment