Commit 45676885 authored by Shaohua Li's avatar Shaohua Li Committed by Linus Torvalds

thp: improve order in lru list for split huge page

Put the tail subpages of an isolated hugepage under splitting in the lru
reclaim head as they supposedly should be isolated too next.

Queues the subpages in physical order in the lru for non isolated
hugepages under splitting.  That might provide some theoretical cache
benefit to the buddy allocator later.
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f21760b1
...@@ -1228,7 +1228,6 @@ static int __split_huge_page_splitting(struct page *page, ...@@ -1228,7 +1228,6 @@ static int __split_huge_page_splitting(struct page *page,
static void __split_huge_page_refcount(struct page *page) static void __split_huge_page_refcount(struct page *page)
{ {
int i; int i;
unsigned long head_index = page->index;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
int zonestat; int zonestat;
int tail_count = 0; int tail_count = 0;
...@@ -1239,7 +1238,7 @@ static void __split_huge_page_refcount(struct page *page) ...@@ -1239,7 +1238,7 @@ static void __split_huge_page_refcount(struct page *page)
/* complete memcg works before add pages to LRU */ /* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(page); mem_cgroup_split_huge_fixup(page);
for (i = 1; i < HPAGE_PMD_NR; i++) { for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
struct page *page_tail = page + i; struct page *page_tail = page + i;
/* tail_page->_mapcount cannot change */ /* tail_page->_mapcount cannot change */
...@@ -1302,7 +1301,7 @@ static void __split_huge_page_refcount(struct page *page) ...@@ -1302,7 +1301,7 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(page_tail->mapping); BUG_ON(page_tail->mapping);
page_tail->mapping = page->mapping; page_tail->mapping = page->mapping;
page_tail->index = ++head_index; page_tail->index = page->index + i;
BUG_ON(!PageAnon(page_tail)); BUG_ON(!PageAnon(page_tail));
BUG_ON(!PageUptodate(page_tail)); BUG_ON(!PageUptodate(page_tail));
......
...@@ -681,7 +681,7 @@ void lru_add_page_tail(struct zone* zone, ...@@ -681,7 +681,7 @@ void lru_add_page_tail(struct zone* zone,
if (likely(PageLRU(page))) if (likely(PageLRU(page)))
list_add(&page_tail->lru, page->lru.prev); list_add(&page_tail->lru, page->lru.prev);
else else
list_add(&page_tail->lru, &lruvec->lists[lru]); list_add(&page_tail->lru, lruvec->lists[lru].prev);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, __mod_zone_page_state(zone, NR_LRU_BASE + lru,
hpage_nr_pages(page_tail)); hpage_nr_pages(page_tail));
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment