Commit ef0f8f11 authored by Xavier Deguillard's avatar Xavier Deguillard Committed by Greg Kroah-Hartman

VMware balloon: partially inline vmballoon_reserve_page.

This split the function in two: the allocation part is inlined into the
inflate function and the lock part is kept into his own function.

This change is needed in order to be able to allocate more than one page
before doing the hypervisor call.
Signed-off-by: default avatarXavier Deguillard <xdeguillard@vmware.com>
Acked-by: default avatarDmitry Torokhov <dtor@vmware.com>
Signed-off-by: default avatarPhilip P. Moltmann <moltmann@vmware.com>
Acked-by: default avatarAndy King <acking@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d719b76c
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
MODULE_AUTHOR("VMware, Inc."); MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
MODULE_VERSION("1.2.1.3-k"); MODULE_VERSION("1.2.2.0-k");
MODULE_ALIAS("dmi:*:svnVMware*:*"); MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl"); MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -402,35 +402,14 @@ static void vmballoon_reset(struct vmballoon *b) ...@@ -402,35 +402,14 @@ static void vmballoon_reset(struct vmballoon *b)
} }
/* /*
* Allocate (or reserve) a page for the balloon and notify the host. If host * Notify the host of a ballooned page. If host rejects the page put it on the
* refuses the page put it on "refuse" list and allocate another one until host * refuse list, those refused page are then released at the end of the
* is satisfied. "Refused" pages are released at the end of inflation cycle * inflation cycle.
* (when we allocate b->rate_alloc pages).
*/ */
static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
{ {
struct page *page; int locked, hv_status;
gfp_t flags;
unsigned int hv_status;
int locked;
flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
do {
if (!can_sleep)
STATS_INC(b->stats.alloc);
else
STATS_INC(b->stats.sleep_alloc);
page = alloc_page(flags);
if (!page) {
if (!can_sleep)
STATS_INC(b->stats.alloc_fail);
else
STATS_INC(b->stats.sleep_alloc_fail);
return -ENOMEM;
}
/* inform monitor */
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
if (locked > 0) { if (locked > 0) {
STATS_INC(b->stats.refused_alloc); STATS_INC(b->stats.refused_alloc);
...@@ -446,11 +425,14 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) ...@@ -446,11 +425,14 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
* and retry allocation, unless we already accumulated * and retry allocation, unless we already accumulated
* too many of them, in which case take a breather. * too many of them, in which case take a breather.
*/ */
if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
b->n_refused_pages++;
list_add(&page->lru, &b->refused_pages); list_add(&page->lru, &b->refused_pages);
if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED) } else {
__free_page(page);
}
return -EIO; return -EIO;
} }
} while (locked != 0);
/* track allocated page */ /* track allocated page */
list_add(&page->lru, &b->pages); list_add(&page->lru, &b->pages);
...@@ -512,7 +494,7 @@ static void vmballoon_inflate(struct vmballoon *b) ...@@ -512,7 +494,7 @@ static void vmballoon_inflate(struct vmballoon *b)
unsigned int i; unsigned int i;
unsigned int allocations = 0; unsigned int allocations = 0;
int error = 0; int error = 0;
bool alloc_can_sleep = false; gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
...@@ -543,19 +525,16 @@ static void vmballoon_inflate(struct vmballoon *b) ...@@ -543,19 +525,16 @@ static void vmballoon_inflate(struct vmballoon *b)
__func__, goal, rate, b->rate_alloc); __func__, goal, rate, b->rate_alloc);
for (i = 0; i < goal; i++) { for (i = 0; i < goal; i++) {
struct page *page;
error = vmballoon_reserve_page(b, alloc_can_sleep); if (flags == VMW_PAGE_ALLOC_NOSLEEP)
if (error) { STATS_INC(b->stats.alloc);
if (error != -ENOMEM) { else
/* STATS_INC(b->stats.sleep_alloc);
* Not a page allocation failure, stop this
* cycle. Maybe we'll get new target from
* the host soon.
*/
break;
}
if (alloc_can_sleep) { page = alloc_page(flags);
if (!page) {
if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
/* /*
* CANSLEEP page allocation failed, so guest * CANSLEEP page allocation failed, so guest
* is under severe memory pressure. Quickly * is under severe memory pressure. Quickly
...@@ -563,8 +542,10 @@ static void vmballoon_inflate(struct vmballoon *b) ...@@ -563,8 +542,10 @@ static void vmballoon_inflate(struct vmballoon *b)
*/ */
b->rate_alloc = max(b->rate_alloc / 2, b->rate_alloc = max(b->rate_alloc / 2,
VMW_BALLOON_RATE_ALLOC_MIN); VMW_BALLOON_RATE_ALLOC_MIN);
STATS_INC(b->stats.sleep_alloc_fail);
break; break;
} }
STATS_INC(b->stats.alloc_fail);
/* /*
* NOSLEEP page allocation failed, so the guest is * NOSLEEP page allocation failed, so the guest is
...@@ -579,11 +560,16 @@ static void vmballoon_inflate(struct vmballoon *b) ...@@ -579,11 +560,16 @@ static void vmballoon_inflate(struct vmballoon *b)
if (i >= b->rate_alloc) if (i >= b->rate_alloc)
break; break;
alloc_can_sleep = true; flags = VMW_PAGE_ALLOC_CANSLEEP;
/* Lower rate for sleeping allocations. */ /* Lower rate for sleeping allocations. */
rate = b->rate_alloc; rate = b->rate_alloc;
continue;
} }
error = vmballoon_lock_page(b, page);
if (error)
break;
if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
cond_resched(); cond_resched();
allocations = 0; allocations = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment