Commit 28073b02 authored by Hillf Danton's avatar Hillf Danton Committed by Linus Torvalds

mm: hugetlb: defer freeing pages when gathering surplus pages

When gathering surplus pages, the number of needed pages is recomputed
after reacquiring hugetlb lock to catch changes in resv_huge_pages and
free_huge_pages.  Plus it is recomputed with the number of newly allocated
pages involved.

Thus freeing pages can be deferred a bit to see if the final page request
is satisfied, though pages could be allocated less than needed.
Signed-off-by: default avatarHillf Danton <dhillf@gmail.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cc715d99
...@@ -852,6 +852,7 @@ static int gather_surplus_pages(struct hstate *h, int delta) ...@@ -852,6 +852,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
struct page *page, *tmp; struct page *page, *tmp;
int ret, i; int ret, i;
int needed, allocated; int needed, allocated;
bool alloc_ok = true;
needed = (h->resv_huge_pages + delta) - h->free_huge_pages; needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
if (needed <= 0) { if (needed <= 0) {
...@@ -867,17 +868,13 @@ static int gather_surplus_pages(struct hstate *h, int delta) ...@@ -867,17 +868,13 @@ static int gather_surplus_pages(struct hstate *h, int delta)
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) { for (i = 0; i < needed; i++) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE); page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) if (!page) {
/* alloc_ok = false;
* We were not able to allocate enough pages to break;
* satisfy the entire reservation so we free what }
* we've allocated so far.
*/
goto free;
list_add(&page->lru, &surplus_list); list_add(&page->lru, &surplus_list);
} }
allocated += needed; allocated += i;
/* /*
* After retaking hugetlb_lock, we need to recalculate 'needed' * After retaking hugetlb_lock, we need to recalculate 'needed'
...@@ -886,9 +883,16 @@ static int gather_surplus_pages(struct hstate *h, int delta) ...@@ -886,9 +883,16 @@ static int gather_surplus_pages(struct hstate *h, int delta)
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) - needed = (h->resv_huge_pages + delta) -
(h->free_huge_pages + allocated); (h->free_huge_pages + allocated);
if (needed > 0) if (needed > 0) {
goto retry; if (alloc_ok)
goto retry;
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
goto free;
}
/* /*
* The surplus_list now contains _at_least_ the number of extra pages * The surplus_list now contains _at_least_ the number of extra pages
* needed to accommodate the reservation. Add the appropriate number * needed to accommodate the reservation. Add the appropriate number
...@@ -914,10 +918,10 @@ static int gather_surplus_pages(struct hstate *h, int delta) ...@@ -914,10 +918,10 @@ static int gather_surplus_pages(struct hstate *h, int delta)
VM_BUG_ON(page_count(page)); VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page); enqueue_huge_page(h, page);
} }
free:
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */ /* Free unnecessary surplus pages to the buddy allocator */
free:
if (!list_empty(&surplus_list)) { if (!list_empty(&surplus_list)) {
list_for_each_entry_safe(page, tmp, &surplus_list, lru) { list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
list_del(&page->lru); list_del(&page->lru);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment