Commit 43dc2a0f authored by Juergen Gross's avatar Juergen Gross

xen: move max_pfn in xen_memory_setup() out of function scope

Instead of having max_pfn as a local variable of xen_memory_setup(),
make it a static variable in setup.c instead. This avoids having to
pass it to subfunctions, which will be needed in more cases in future.

Rename it to ini_nr_pages, as the value denotes the currently usable
number of memory pages as passed from the hypervisor at boot time.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarMarek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent c4498ae3
...@@ -46,6 +46,9 @@ bool xen_pv_pci_possible; ...@@ -46,6 +46,9 @@ bool xen_pv_pci_possible;
/* E820 map used during setting up memory. */ /* E820 map used during setting up memory. */
static struct e820_table xen_e820_table __initdata; static struct e820_table xen_e820_table __initdata;
/* Number of initially usable memory pages. */
static unsigned long ini_nr_pages __initdata;
/* /*
* Buffer used to remap identity mapped pages. We only need the virtual space. * Buffer used to remap identity mapped pages. We only need the virtual space.
* The physical page behind this address is remapped as needed to different * The physical page behind this address is remapped as needed to different
...@@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn) ...@@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails. * as a fallback if the remapping fails.
*/ */
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages) unsigned long end_pfn)
{ {
unsigned long pfn, end; unsigned long pfn, end;
int ret; int ret;
...@@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, ...@@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN_ON(start_pfn > end_pfn); WARN_ON(start_pfn > end_pfn);
/* Release pages first. */ /* Release pages first. */
end = min(end_pfn, nr_pages); end = min(end_pfn, ini_nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) { for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn); unsigned long mfn = pfn_to_mfn(pfn);
...@@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk( ...@@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
* to Xen and not remapped. * to Xen and not remapped.
*/ */
static unsigned long __init xen_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
unsigned long remap_pfn)
{ {
unsigned long pfn; unsigned long pfn;
unsigned long i = 0; unsigned long i = 0;
unsigned long n = end_pfn - start_pfn; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0) if (remap_pfn == 0)
remap_pfn = nr_pages; remap_pfn = ini_nr_pages;
while (i < n) { while (i < n) {
unsigned long cur_pfn = start_pfn + i; unsigned long cur_pfn = start_pfn + i;
...@@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long remap_range_size; unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */ /* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) { if (cur_pfn >= ini_nr_pages) {
/* Identity map remaining pages */ /* Identity map remaining pages */
set_phys_range_identity(cur_pfn, cur_pfn + size); set_phys_range_identity(cur_pfn, cur_pfn + size);
break; break;
} }
if (cur_pfn + size > nr_pages) if (cur_pfn + size > ini_nr_pages)
size = nr_pages - cur_pfn; size = ini_nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn); remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) { if (!remap_range_size) {
pr_warn("Unable to find available pfn range, not remapping identity pages\n"); pr_warn("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn, xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages); cur_pfn + left);
break; break;
} }
/* Adjust size to fit in current e820 RAM region */ /* Adjust size to fit in current e820 RAM region */
...@@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
} }
static unsigned long __init xen_count_remap_pages( static unsigned long __init xen_count_remap_pages(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long start_pfn, unsigned long end_pfn,
unsigned long remap_pages) unsigned long remap_pages)
{ {
if (start_pfn >= nr_pages) if (start_pfn >= ini_nr_pages)
return remap_pages; return remap_pages;
return remap_pages + min(end_pfn, nr_pages) - start_pfn; return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
} }
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, static unsigned long __init xen_foreach_remap_area(
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
unsigned long nr_pages, unsigned long last_val)) unsigned long last_val))
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long ret_val = 0; unsigned long ret_val = 0;
...@@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, ...@@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
end_pfn = PFN_UP(entry->addr); end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
ret_val = func(start_pfn, end_pfn, nr_pages, ret_val = func(start_pfn, end_pfn, ret_val);
ret_val);
start = end; start = end;
} }
} }
...@@ -700,7 +701,7 @@ static void __init xen_reserve_xen_mfnlist(void) ...@@ -700,7 +701,7 @@ static void __init xen_reserve_xen_mfnlist(void)
**/ **/
char * __init xen_memory_setup(void) char * __init xen_memory_setup(void)
{ {
unsigned long max_pfn, pfn_s, n_pfns; unsigned long pfn_s, n_pfns;
phys_addr_t mem_end, addr, size, chunk_size; phys_addr_t mem_end, addr, size, chunk_size;
u32 type; u32 type;
int rc; int rc;
...@@ -712,9 +713,8 @@ char * __init xen_memory_setup(void) ...@@ -712,9 +713,8 @@ char * __init xen_memory_setup(void)
int op; int op;
xen_parse_512gb(); xen_parse_512gb();
max_pfn = xen_get_pages_limit(); ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
max_pfn = min(max_pfn, xen_start_info->nr_pages); mem_end = PFN_PHYS(ini_nr_pages);
mem_end = PFN_PHYS(max_pfn);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
...@@ -789,10 +789,10 @@ char * __init xen_memory_setup(void) ...@@ -789,10 +789,10 @@ char * __init xen_memory_setup(void)
max_pages = xen_get_max_pages(); max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ /* How many extra pages do we need due to remapping? */
max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); max_pages += xen_foreach_remap_area(xen_count_remap_pages);
if (max_pages > max_pfn) if (max_pages > ini_nr_pages)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - ini_nr_pages;
/* /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
...@@ -801,8 +801,8 @@ char * __init xen_memory_setup(void) ...@@ -801,8 +801,8 @@ char * __init xen_memory_setup(void)
* Make sure we have no memory above max_pages, as this area * Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management. * isn't handled by the p2m management.
*/ */
maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)); maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn); extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
i = 0; i = 0;
addr = xen_e820_table.entries[0].addr; addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size; size = xen_e820_table.entries[0].size;
...@@ -885,7 +885,7 @@ char * __init xen_memory_setup(void) ...@@ -885,7 +885,7 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the * Set identity map on non-RAM pages and prepare remapping the
* underlying RAM. * underlying RAM.
*/ */
xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages); pr_info("Released %ld page(s)\n", xen_released_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment