Commit ce990f1d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - a fix for the Xen gntdev driver

 - a fix for running as Xen dom0 booted via EFI and the EFI framebuffer
   being located above 4GB

 - a series for support of mapping other guest's memory by using zone
   device when running as Xen guest on Arm

* tag 'for-linus-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  dt-bindings: xen: Clarify "reg" purpose
  arm/xen: Read extended regions from DT and init Xen resource
  xen/unpopulated-alloc: Add mechanism to use Xen resource
  xen/balloon: Bring alloc(free)_xenballooned_pages helpers back
  arm/xen: Switch to use gnttab_setup_auto_xlat_frames() for DT
  xen/unpopulated-alloc: Drop check for virt_addr_valid() in fill_list()
  xen/x86: obtain upper 32 bits of video frame buffer address for Dom0
  xen/gntdev: fix unmap notification order
parents 64ad9461 54bb4a91
...@@ -7,15 +7,17 @@ the following properties: ...@@ -7,15 +7,17 @@ the following properties:
compatible = "xen,xen-<version>", "xen,xen"; compatible = "xen,xen-<version>", "xen,xen";
where <version> is the version of the Xen ABI of the platform. where <version> is the version of the Xen ABI of the platform.
- reg: specifies the base physical address and size of a region in - reg: specifies the base physical address and size of the regions in memory
memory where the grant table should be mapped to, using an where the special resources should be mapped to, using an HYPERVISOR_memory_op
HYPERVISOR_memory_op hypercall. The memory region is large enough to map hypercall.
the whole grant table (it is larger or equal to gnttab_max_grant_frames()). Region 0 is reserved for mapping grant table, it must be always present.
This property is unnecessary when booting Dom0 using ACPI. The memory region is large enough to map the whole grant table (it is larger
or equal to gnttab_max_grant_frames()).
Regions 1...N are extended regions (unused address space) for mapping foreign
GFNs and grants, they might be absent if there is nothing to expose.
- interrupts: the interrupt used by Xen to inject event notifications. - interrupts: the interrupt used by Xen to inject event notifications.
A GIC node is also required. A GIC node is also required.
This property is unnecessary when booting Dom0 using ACPI.
To support UEFI on Xen ARM virtual platforms, Xen populates the FDT "uefi" node To support UEFI on Xen ARM virtual platforms, Xen populates the FDT "uefi" node
under /hypervisor with following parameters: under /hypervisor with following parameters:
......
...@@ -59,6 +59,10 @@ unsigned long xen_released_pages; ...@@ -59,6 +59,10 @@ unsigned long xen_released_pages;
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
static __read_mostly unsigned int xen_events_irq; static __read_mostly unsigned int xen_events_irq;
static __read_mostly phys_addr_t xen_grant_frames;
#define GRANT_TABLE_INDEX 0
#define EXT_REGION_INDEX 1
uint32_t xen_start_flags; uint32_t xen_start_flags;
EXPORT_SYMBOL(xen_start_flags); EXPORT_SYMBOL(xen_start_flags);
...@@ -300,9 +304,115 @@ static void __init xen_acpi_guest_init(void) ...@@ -300,9 +304,115 @@ static void __init xen_acpi_guest_init(void)
#endif #endif
} }
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
/*
* A type-less specific Xen resource which contains extended regions
* (unused regions of guest physical address space provided by the hypervisor).
*/
static struct resource xen_resource = {
.name = "Xen unused space",
};
int __init arch_xen_unpopulated_init(struct resource **res)
{
struct device_node *np;
struct resource *regs, *tmp_res;
uint64_t min_gpaddr = -1, max_gpaddr = 0;
unsigned int i, nr_reg = 0;
int rc;
if (!xen_domain())
return -ENODEV;
if (!acpi_disabled)
return -ENODEV;
np = of_find_compatible_node(NULL, NULL, "xen,xen");
if (WARN_ON(!np))
return -ENODEV;
/* Skip region 0 which is reserved for grant table space */
while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL))
nr_reg++;
if (!nr_reg) {
pr_err("No extended regions are found\n");
return -EINVAL;
}
regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
/*
* Create resource from extended regions provided by the hypervisor to be
* used as unused address space for Xen scratch pages.
*/
for (i = 0; i < nr_reg; i++) {
rc = of_address_to_resource(np, i + EXT_REGION_INDEX, &regs[i]);
if (rc)
goto err;
if (max_gpaddr < regs[i].end)
max_gpaddr = regs[i].end;
if (min_gpaddr > regs[i].start)
min_gpaddr = regs[i].start;
}
xen_resource.start = min_gpaddr;
xen_resource.end = max_gpaddr;
/*
* Mark holes between extended regions as unavailable. The rest of that
* address space will be available for the allocation.
*/
for (i = 1; i < nr_reg; i++) {
resource_size_t start, end;
/* There is an overlap between regions */
if (regs[i - 1].end + 1 > regs[i].start) {
rc = -EINVAL;
goto err;
}
/* There is no hole between regions */
if (regs[i - 1].end + 1 == regs[i].start)
continue;
start = regs[i - 1].end + 1;
end = regs[i].start - 1;
tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
if (!tmp_res) {
rc = -ENOMEM;
goto err;
}
tmp_res->name = "Unavailable space";
tmp_res->start = start;
tmp_res->end = end;
rc = insert_resource(&xen_resource, tmp_res);
if (rc) {
pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc);
kfree(tmp_res);
goto err;
}
}
*res = &xen_resource;
err:
kfree(regs);
return rc;
}
#endif
static void __init xen_dt_guest_init(void) static void __init xen_dt_guest_init(void)
{ {
struct device_node *xen_node; struct device_node *xen_node;
struct resource res;
xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
if (!xen_node) { if (!xen_node) {
...@@ -311,13 +421,19 @@ static void __init xen_dt_guest_init(void) ...@@ -311,13 +421,19 @@ static void __init xen_dt_guest_init(void)
} }
xen_events_irq = irq_of_parse_and_map(xen_node, 0); xen_events_irq = irq_of_parse_and_map(xen_node, 0);
if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
pr_err("Xen grant table region is not found\n");
return;
}
xen_grant_frames = res.start;
} }
static int __init xen_guest_init(void) static int __init xen_guest_init(void)
{ {
struct xen_add_to_physmap xatp; struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL; struct shared_info *shared_info_page = NULL;
int cpu; int rc, cpu;
if (!xen_domain()) if (!xen_domain())
return 0; return 0;
...@@ -370,12 +486,16 @@ static int __init xen_guest_init(void) ...@@ -370,12 +486,16 @@ static int __init xen_guest_init(void)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(xen_vcpu_id, cpu) = cpu; per_cpu(xen_vcpu_id, cpu) = cpu;
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); if (!xen_grant_frames) {
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
&xen_auto_xlat_grant_frames.vaddr, rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
xen_auto_xlat_grant_frames.count)) { &xen_auto_xlat_grant_frames.vaddr,
xen_auto_xlat_grant_frames.count);
} else
rc = gnttab_setup_auto_xlat_frames(xen_grant_frames);
if (rc) {
free_percpu(xen_vcpu_info); free_percpu(xen_vcpu_info);
return -ENOMEM; return rc;
} }
gnttab_init(); gnttab_init();
......
...@@ -62,14 +62,18 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) ...@@ -62,14 +62,18 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
break; break;
} }
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.gbl_caps)
+ sizeof(info->u.vesa_lfb.gbl_caps))
screen_info->capabilities = info->u.vesa_lfb.gbl_caps;
if (size >= offsetof(struct dom0_vga_console_info, if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.mode_attrs) u.vesa_lfb.mode_attrs)
+ sizeof(info->u.vesa_lfb.mode_attrs)) + sizeof(info->u.vesa_lfb.mode_attrs))
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.ext_lfb_base)
+ sizeof(info->u.vesa_lfb.ext_lfb_base)
&& info->u.vesa_lfb.ext_lfb_base) {
screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
}
break; break;
} }
} }
...@@ -327,7 +327,7 @@ config XEN_FRONT_PGDIR_SHBUF ...@@ -327,7 +327,7 @@ config XEN_FRONT_PGDIR_SHBUF
config XEN_UNPOPULATED_ALLOC config XEN_UNPOPULATED_ALLOC
bool "Use unpopulated memory ranges for guest mappings" bool "Use unpopulated memory ranges for guest mappings"
depends on X86 && ZONE_DEVICE depends on ZONE_DEVICE
default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0 default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
help help
Use unpopulated memory ranges in order to create mappings for guest Use unpopulated memory ranges in order to create mappings for guest
......
...@@ -581,7 +581,6 @@ void balloon_set_new_target(unsigned long target) ...@@ -581,7 +581,6 @@ void balloon_set_new_target(unsigned long target)
} }
EXPORT_SYMBOL_GPL(balloon_set_new_target); EXPORT_SYMBOL_GPL(balloon_set_new_target);
#ifndef CONFIG_XEN_UNPOPULATED_ALLOC
static int add_ballooned_pages(unsigned int nr_pages) static int add_ballooned_pages(unsigned int nr_pages)
{ {
enum bp_state st; enum bp_state st;
...@@ -610,12 +609,12 @@ static int add_ballooned_pages(unsigned int nr_pages) ...@@ -610,12 +609,12 @@ static int add_ballooned_pages(unsigned int nr_pages)
} }
/** /**
* xen_alloc_unpopulated_pages - get pages that have been ballooned out * xen_alloc_ballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get * @nr_pages: Number of pages to get
* @pages: pages returned * @pages: pages returned
* @return 0 on success, error otherwise * @return 0 on success, error otherwise
*/ */
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
{ {
unsigned int pgno = 0; unsigned int pgno = 0;
struct page *page; struct page *page;
...@@ -652,23 +651,23 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -652,23 +651,23 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
return 0; return 0;
out_undo: out_undo:
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
xen_free_unpopulated_pages(pgno, pages); xen_free_ballooned_pages(pgno, pages);
/* /*
* NB: free_xenballooned_pages will only subtract pgno pages, but since * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
* target_unpopulated is incremented with nr_pages at the start we need * target_unpopulated is incremented with nr_pages at the start we need
* to remove the remaining ones also, or accounting will be screwed. * to remove the remaining ones also, or accounting will be screwed.
*/ */
balloon_stats.target_unpopulated -= nr_pages - pgno; balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret; return ret;
} }
EXPORT_SYMBOL(xen_alloc_unpopulated_pages); EXPORT_SYMBOL(xen_alloc_ballooned_pages);
/** /**
* xen_free_unpopulated_pages - return pages retrieved with get_ballooned_pages * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
* @nr_pages: Number of pages * @nr_pages: Number of pages
* @pages: pages to return * @pages: pages to return
*/ */
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
{ {
unsigned int i; unsigned int i;
...@@ -687,9 +686,9 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -687,9 +686,9 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
} }
EXPORT_SYMBOL(xen_free_unpopulated_pages); EXPORT_SYMBOL(xen_free_ballooned_pages);
#if defined(CONFIG_XEN_PV) #if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn, static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages) unsigned long pages)
{ {
...@@ -712,7 +711,6 @@ static void __init balloon_add_region(unsigned long start_pfn, ...@@ -712,7 +711,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
balloon_stats.total_pages += extra_pfn_end - start_pfn; balloon_stats.total_pages += extra_pfn_end - start_pfn;
} }
#endif #endif
#endif
static int __init balloon_init(void) static int __init balloon_init(void)
{ {
......
...@@ -250,13 +250,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) ...@@ -250,13 +250,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users)) if (!refcount_dec_and_test(&map->users))
return; return;
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event); notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event); evtchn_put(map->notify.event);
} }
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map); gntdev_free_map(map);
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <xen/balloon.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/xen.h> #include <xen/xen.h>
...@@ -15,13 +16,29 @@ static DEFINE_MUTEX(list_lock); ...@@ -15,13 +16,29 @@ static DEFINE_MUTEX(list_lock);
static struct page *page_list; static struct page *page_list;
static unsigned int list_count; static unsigned int list_count;
static struct resource *target_resource;
/*
* If arch is not happy with system "iomem_resource" being used for
* the region allocation it can provide it's own view by creating specific
* Xen resource with unused regions of guest physical address space provided
* by the hypervisor.
*/
int __weak __init arch_xen_unpopulated_init(struct resource **res)
{
*res = &iomem_resource;
return 0;
}
static int fill_list(unsigned int nr_pages) static int fill_list(unsigned int nr_pages)
{ {
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
struct resource *res; struct resource *res, *tmp_res = NULL;
void *vaddr; void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
int ret = -ENOMEM; struct range mhp_range;
int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL); res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) if (!res)
...@@ -30,14 +47,40 @@ static int fill_list(unsigned int nr_pages) ...@@ -30,14 +47,40 @@ static int fill_list(unsigned int nr_pages)
res->name = "Xen scratch"; res->name = "Xen scratch";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res, mhp_range = mhp_get_pluggable_range(true);
alloc_pages * PAGE_SIZE, 0, -1,
ret = allocate_resource(target_resource, res,
alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) { if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n"); pr_err("Cannot allocate new IOMEM resource\n");
goto err_resource; goto err_resource;
} }
/*
* Reserve the region previously allocated from Xen resource to avoid
* re-using it by someone else.
*/
if (target_resource != &iomem_resource) {
tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
if (!tmp_res) {
ret = -ENOMEM;
goto err_insert;
}
tmp_res->name = res->name;
tmp_res->start = res->start;
tmp_res->end = res->end;
tmp_res->flags = res->flags;
ret = request_resource(&iomem_resource, tmp_res);
if (ret < 0) {
pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
kfree(tmp_res);
goto err_insert;
}
}
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) { if (!pgmap) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -85,7 +128,6 @@ static int fill_list(unsigned int nr_pages) ...@@ -85,7 +128,6 @@ static int fill_list(unsigned int nr_pages)
for (i = 0; i < alloc_pages; i++) { for (i = 0; i < alloc_pages; i++) {
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
pg->zone_device_data = page_list; pg->zone_device_data = page_list;
page_list = pg; page_list = pg;
list_count++; list_count++;
...@@ -96,6 +138,11 @@ static int fill_list(unsigned int nr_pages) ...@@ -96,6 +138,11 @@ static int fill_list(unsigned int nr_pages)
err_memremap: err_memremap:
kfree(pgmap); kfree(pgmap);
err_pgmap: err_pgmap:
if (tmp_res) {
release_resource(tmp_res);
kfree(tmp_res);
}
err_insert:
release_resource(res); release_resource(res);
err_resource: err_resource:
kfree(res); kfree(res);
...@@ -113,6 +160,14 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -113,6 +160,14 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
/*
* Fallback to default behavior if we do not have any suitable resource
* to allocate required region from and as the result we won't be able to
* construct pages.
*/
if (!target_resource)
return xen_alloc_ballooned_pages(nr_pages, pages);
mutex_lock(&list_lock); mutex_lock(&list_lock);
if (list_count < nr_pages) { if (list_count < nr_pages) {
ret = fill_list(nr_pages - list_count); ret = fill_list(nr_pages - list_count);
...@@ -160,6 +215,11 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) ...@@ -160,6 +215,11 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{ {
unsigned int i; unsigned int i;
if (!target_resource) {
xen_free_ballooned_pages(nr_pages, pages);
return;
}
mutex_lock(&list_lock); mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
pages[i]->zone_device_data = page_list; pages[i]->zone_device_data = page_list;
...@@ -202,3 +262,20 @@ static int __init init(void) ...@@ -202,3 +262,20 @@ static int __init init(void)
} }
subsys_initcall(init); subsys_initcall(init);
#endif #endif
static int __init unpopulated_init(void)
{
int ret;
if (!xen_domain())
return -ENODEV;
ret = arch_xen_unpopulated_init(&target_resource);
if (ret) {
pr_err("xen:unpopulated: Cannot initialize target resource\n");
target_resource = NULL;
}
return ret;
}
early_initcall(unpopulated_init);
...@@ -26,6 +26,9 @@ extern struct balloon_stats balloon_stats; ...@@ -26,6 +26,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target); void balloon_set_new_target(unsigned long target);
int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_BALLOON #ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void); void xen_balloon_init(void);
#else #else
......
...@@ -722,6 +722,9 @@ struct dom0_vga_console_info { ...@@ -722,6 +722,9 @@ struct dom0_vga_console_info {
uint32_t gbl_caps; uint32_t gbl_caps;
/* Mode attributes (offset 0x0, VESA command 0x4f01). */ /* Mode attributes (offset 0x0, VESA command 0x4f01). */
uint16_t mode_attrs; uint16_t mode_attrs;
uint16_t pad;
/* high 32 bits of lfb_base */
uint32_t ext_lfb_base;
} vesa_lfb; } vesa_lfb;
} u; } u;
}; };
......
...@@ -52,7 +52,23 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, ...@@ -52,7 +52,23 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size; extern u64 xen_saved_max_mem_size;
#endif #endif
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
#include <linux/ioport.h>
int arch_xen_unpopulated_init(struct resource **res);
#else
#include <xen/balloon.h>
static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
struct page **pages)
{
return xen_alloc_ballooned_pages(nr_pages, pages);
}
static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
struct page **pages)
{
xen_free_ballooned_pages(nr_pages, pages);
}
#endif
#endif /* _XEN_XEN_H */ #endif /* _XEN_XEN_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment