Commit d5106539 authored by Andrew Jones's avatar Andrew Jones Committed by Paolo Bonzini

kvm: selftests: add vm_phy_pages_alloc

Signed-off-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent eabe7881
...@@ -110,6 +110,8 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -110,6 +110,8 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint32_t pgd_memslot); uint32_t pgd_memslot);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot); uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot);
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
void *guest_code); void *guest_code);
......
...@@ -1378,10 +1378,11 @@ const char *exit_reason_str(unsigned int exit_reason) ...@@ -1378,10 +1378,11 @@ const char *exit_reason_str(unsigned int exit_reason)
} }
/* /*
* Physical Page Allocate * Physical Contiguous Page Allocator
* *
* Input Args: * Input Args:
* vm - Virtual Machine * vm - Virtual Machine
* num - number of pages
* paddr_min - Physical address minimum * paddr_min - Physical address minimum
* memslot - Memory region to allocate page from * memslot - Memory region to allocate page from
* *
...@@ -1390,16 +1391,18 @@ const char *exit_reason_str(unsigned int exit_reason) ...@@ -1390,16 +1391,18 @@ const char *exit_reason_str(unsigned int exit_reason)
* Return: * Return:
* Starting physical address * Starting physical address
* *
* Within the VM specified by vm, locates an available physical page * Within the VM specified by vm, locates a range of available physical
* at or above paddr_min. If found, the page is marked as in use * pages at or above paddr_min. If found, the pages are marked as in use
* and its address is returned. A TEST_ASSERT failure occurs if no * and thier base address is returned. A TEST_ASSERT failure occurs if
* page is available at or above paddr_min. * not enough pages are available at or above paddr_min.
*/ */
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
uint32_t memslot) vm_paddr_t paddr_min, uint32_t memslot)
{ {
struct userspace_mem_region *region; struct userspace_mem_region *region;
sparsebit_idx_t pg; sparsebit_idx_t pg, base;
TEST_ASSERT(num > 0, "Must allocate at least one page");
TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n" "not divisible by page size.\n"
...@@ -1407,25 +1410,36 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, ...@@ -1407,25 +1410,36 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
paddr_min, vm->page_size); paddr_min, vm->page_size);
region = memslot2region(vm, memslot); region = memslot2region(vm, memslot);
pg = paddr_min >> vm->page_shift; base = pg = paddr_min >> vm->page_shift;
/* Locate next available physical page at or above paddr_min. */ do {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) { for (; pg < base + num; ++pg) {
pg = sparsebit_next_set(region->unused_phy_pages, pg); if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
if (pg == 0) { base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
fprintf(stderr, "No guest physical page available, " break;
"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", }
paddr_min, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
} }
} while (pg && pg != base + num);
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
paddr_min, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
} }
/* Specify page as in use and return its address. */ for (pg = base; pg < base + num; ++pg)
sparsebit_clear(region->unused_phy_pages, pg); sparsebit_clear(region->unused_phy_pages, pg);
return base * vm->page_size;
}
return pg * vm->page_size; vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot)
{
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment