Commit f742d94f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Rename vm_vcpu_add* helpers to better show relationships

Rename vm_vcpu_add() to __vm_vcpu_add(), and vm_vcpu_add_default() to
vm_vcpu_add() to show the relationship between the newly minted
vm_vcpu_add() and __vm_vcpu_add().
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1422efd6
...@@ -418,7 +418,7 @@ static void run_test(struct vcpu_config *c) ...@@ -418,7 +418,7 @@ static void run_test(struct vcpu_config *c)
vm = vm_create_barebones(); vm = vm_create_barebones();
prepare_vcpu_init(c, &init); prepare_vcpu_init(c, &init);
vm_vcpu_add(vm, 0); __vm_vcpu_add(vm, 0);
aarch64_vcpu_setup(vm, 0, &init); aarch64_vcpu_setup(vm, 0, &init);
finalize_vcpu(vm, 0, c); finalize_vcpu(vm, 0, c);
......
...@@ -84,8 +84,8 @@ static struct kvm_vm *setup_vm(void *guest_code) ...@@ -84,8 +84,8 @@ static struct kvm_vm *setup_vm(void *guest_code)
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2); init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_code); aarch64_vcpu_add(vm, VCPU_ID_SOURCE, &init, guest_code);
aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_code); aarch64_vcpu_add(vm, VCPU_ID_TARGET, &init, guest_code);
return vm; return vm;
} }
......
...@@ -26,12 +26,12 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init1, ...@@ -26,12 +26,12 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init1,
vm = vm_create_barebones(); vm = vm_create_barebones();
vm_vcpu_add(vm, 0); __vm_vcpu_add(vm, 0);
ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1); ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
if (ret) if (ret)
goto free_exit; goto free_exit;
vm_vcpu_add(vm, 1); __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2); ret = __vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
free_exit: free_exit:
...@@ -51,8 +51,8 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1, ...@@ -51,8 +51,8 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
vm = vm_create_barebones(); vm = vm_create_barebones();
vm_vcpu_add(vm, 0); __vm_vcpu_add(vm, 0);
vm_vcpu_add(vm, 1); __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1); ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
if (ret) if (ret)
......
...@@ -331,7 +331,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type) ...@@ -331,7 +331,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type)
/* Add the rest of the VCPUs */ /* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i) for (i = 1; i < NR_VCPUS; ++i)
vm_vcpu_add_default(v.vm, i, guest_code); vm_vcpu_add(v.vm, i, guest_code);
ret = run_vcpu(v.vm, 3); ret = run_vcpu(v.vm, 3);
TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run"); TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
...@@ -418,17 +418,17 @@ static void test_v3_typer_accesses(void) ...@@ -418,17 +418,17 @@ static void test_v3_typer_accesses(void)
v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3); v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
vm_vcpu_add_default(v.vm, 3, guest_code); vm_vcpu_add(v.vm, 3, guest_code);
v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL, v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL,
"attempting to read GICR_TYPER of non created vcpu"); "attempting to read GICR_TYPER of non created vcpu");
vm_vcpu_add_default(v.vm, 1, guest_code); vm_vcpu_add(v.vm, 1, guest_code);
v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY, v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY,
"read GICR_TYPER before GIC initialized"); "read GICR_TYPER before GIC initialized");
vm_vcpu_add_default(v.vm, 2, guest_code); vm_vcpu_add(v.vm, 2, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
...@@ -559,7 +559,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void) ...@@ -559,7 +559,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
/* Add the rest of the VCPUs */ /* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i) for (i = 1; i < NR_VCPUS; ++i)
vm_vcpu_add_default(v.vm, i, guest_code); vm_vcpu_add(v.vm, i, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
......
...@@ -676,7 +676,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, ...@@ -676,7 +676,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages); vm = __vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages);
log_mode_create_vm_done(vm); log_mode_create_vm_done(vm);
vm_vcpu_add_default(vm, vcpuid, guest_code); vm_vcpu_add(vm, vcpuid, guest_code);
return vm; return vm;
} }
......
...@@ -108,7 +108,7 @@ static void run_test(uint32_t run) ...@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
pr_debug("%s: [%d] start vcpus\n", __func__, run); pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) { for (i = 0; i < VCPU_NUM; ++i) {
vm_vcpu_add_default(vm, i, guest_code); vm_vcpu_add(vm, i, guest_code);
payloads[i].vm = vm; payloads[i].vm = vm;
payloads[i].index = i; payloads[i].index = i;
......
...@@ -64,9 +64,8 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint ...@@ -64,9 +64,8 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint
} }
void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init); void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, struct kvm_vcpu_init *init, void *guest_code);
void *guest_code);
struct ex_regs { struct ex_regs {
u64 regs[31]; u64 regs[31];
......
...@@ -288,7 +288,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, ...@@ -288,7 +288,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
...@@ -659,9 +659,8 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, ...@@ -659,9 +659,8 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code); void *guest_code);
static inline struct kvm_vcpu *vm_vcpu_add_default(struct kvm_vm *vm, static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
uint32_t vcpu_id, void *guest_code)
void *guest_code)
{ {
return vm_arch_vcpu_add(vm, vcpu_id, guest_code); return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
} }
......
...@@ -223,7 +223,7 @@ int main(int argc, char *argv[]) ...@@ -223,7 +223,7 @@ int main(int argc, char *argv[])
for (i = 0; i < max_vm; ++i) { for (i = 0; i < max_vm; ++i) {
vms[i] = vm_create_barebones(); vms[i] = vm_create_barebones();
for (j = 0; j < max_vcpu; ++j) for (j = 0; j < max_vcpu; ++j)
vm_vcpu_add(vms[i], j); __vm_vcpu_add(vms[i], j);
} }
/* Check stats read for every VM and VCPU */ /* Check stats read for every VM and VCPU */
......
...@@ -32,7 +32,7 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus) ...@@ -32,7 +32,7 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++) for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
/* This asserts that the vCPU was created. */ /* This asserts that the vCPU was created. */
vm_vcpu_add(vm, i); __vm_vcpu_add(vm, i);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
......
...@@ -314,16 +314,15 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t in ...@@ -314,16 +314,15 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t in
indent, "", pstate, pc); indent, "", pstate, pc);
} }
struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, struct kvm_vcpu_init *init, void *guest_code)
void *guest_code)
{ {
size_t stack_size = vm->page_size == 4096 ? size_t stack_size = vm->page_size == 4096 ?
DEFAULT_STACK_PGS * vm->page_size : DEFAULT_STACK_PGS * vm->page_size :
vm->page_size; vm->page_size;
uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size, uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN); DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpu_id); struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
aarch64_vcpu_setup(vm, vcpu_id, init); aarch64_vcpu_setup(vm, vcpu_id, init);
...@@ -336,7 +335,7 @@ struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -336,7 +335,7 @@ struct kvm_vcpu *aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code) void *guest_code)
{ {
return aarch64_vcpu_add_default(vm, vcpu_id, NULL, guest_code); return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
} }
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
......
...@@ -328,7 +328,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, ...@@ -328,7 +328,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
for (i = 0; i < nr_vcpus; ++i) { for (i = 0; i < nr_vcpus; ++i) {
uint32_t vcpuid = vcpuids ? vcpuids[i] : i; uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
vm_vcpu_add_default(vm, vcpuid, guest_code); vm_vcpu_add(vm, vcpuid, guest_code);
} }
return vm; return vm;
...@@ -397,7 +397,7 @@ struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) ...@@ -397,7 +397,7 @@ struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
{ {
kvm_vm_restart(vm); kvm_vm_restart(vm);
return vm_vcpu_add(vm, 0); return __vm_vcpu_add(vm, 0);
} }
/* /*
...@@ -1065,7 +1065,7 @@ static int vcpu_mmap_sz(void) ...@@ -1065,7 +1065,7 @@ static int vcpu_mmap_sz(void)
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU. * No additional vCPU setup is done. Returns the vCPU.
*/ */
struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
......
...@@ -287,7 +287,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -287,7 +287,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_mp_state mps; struct kvm_mp_state mps;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
vcpu = vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vm, vcpu_id); riscv_vcpu_mmu_setup(vm, vcpu_id);
/* /*
......
...@@ -170,7 +170,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -170,7 +170,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
stack_vaddr = vm_vaddr_alloc(vm, stack_size, stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_GUEST_STACK_VADDR_MIN); DEFAULT_GUEST_STACK_VADDR_MIN);
vcpu = vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */ /* Setup guest registers */
vcpu_regs_get(vm, vcpu_id, &regs); vcpu_regs_get(vm, vcpu_id, &regs);
......
...@@ -643,7 +643,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -643,7 +643,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
DEFAULT_GUEST_STACK_VADDR_MIN); DEFAULT_GUEST_STACK_VADDR_MIN);
vcpu = vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
vcpu_setup(vm, vcpu_id); vcpu_setup(vm, vcpu_id);
......
...@@ -315,7 +315,7 @@ static void test_zero_memory_regions(void) ...@@ -315,7 +315,7 @@ static void test_zero_memory_regions(void)
pr_info("Testing KVM_RUN with zero added memory regions\n"); pr_info("Testing KVM_RUN with zero added memory regions\n");
vm = vm_create_barebones(); vm = vm_create_barebones();
vcpu = vm_vcpu_add(vm, 0); vcpu = __vm_vcpu_add(vm, 0);
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul); vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
vcpu_run(vm, vcpu->id); vcpu_run(vm, vcpu->id);
......
...@@ -275,7 +275,7 @@ int main(int ac, char **av) ...@@ -275,7 +275,7 @@ int main(int ac, char **av)
/* Add the rest of the VCPUs */ /* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i) for (i = 1; i < NR_VCPUS; ++i)
vm_vcpu_add_default(vm, i, guest_code); vm_vcpu_add(vm, i, guest_code);
steal_time_init(vm); steal_time_init(vm);
......
...@@ -369,7 +369,7 @@ static void test_pmu_config_disable(void (*guest_code)(void)) ...@@ -369,7 +369,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE); vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
vcpu = vm_vcpu_add_default(vm, 0, guest_code); vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vm, vcpu->id);
......
...@@ -92,9 +92,9 @@ static struct kvm_vm *create_vm(void) ...@@ -92,9 +92,9 @@ static struct kvm_vm *create_vm(void)
static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code) static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
{ {
if (bsp_code) if (bsp_code)
vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu); vm_vcpu_add(vm, vcpuid, guest_bsp_vcpu);
else else
vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu); vm_vcpu_add(vm, vcpuid, guest_not_bsp_vcpu);
} }
static void run_vm_bsp(uint32_t bsp_vcpu) static void run_vm_bsp(uint32_t bsp_vcpu)
......
...@@ -95,7 +95,7 @@ int main(int argc, char *argv[]) ...@@ -95,7 +95,7 @@ int main(int argc, char *argv[])
* the vCPU model, i.e. without doing KVM_SET_CPUID2. * the vCPU model, i.e. without doing KVM_SET_CPUID2.
*/ */
vm = vm_create_barebones(); vm = vm_create_barebones();
vcpu = vm_vcpu_add(vm, 0); vcpu = __vm_vcpu_add(vm, 0);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vm, vcpu->id, &sregs);
......
...@@ -56,7 +56,7 @@ static struct kvm_vm *sev_vm_create(bool es) ...@@ -56,7 +56,7 @@ static struct kvm_vm *sev_vm_create(bool es)
vm = vm_create_barebones(); vm = vm_create_barebones();
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL); sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
vm_vcpu_add(vm, i); __vm_vcpu_add(vm, i);
if (es) if (es)
start.policy |= SEV_POLICY_ES; start.policy |= SEV_POLICY_ES;
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start); sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
...@@ -75,7 +75,7 @@ static struct kvm_vm *aux_vm_create(bool with_vcpus) ...@@ -75,7 +75,7 @@ static struct kvm_vm *aux_vm_create(bool with_vcpus)
return vm; return vm;
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
vm_vcpu_add(vm, i); __vm_vcpu_add(vm, i);
return vm; return vm;
} }
...@@ -182,7 +182,7 @@ static void test_sev_migrate_parameters(void) ...@@ -182,7 +182,7 @@ static void test_sev_migrate_parameters(void)
sev_es_vm = sev_vm_create(/* es= */ true); sev_es_vm = sev_vm_create(/* es= */ true);
sev_es_vm_no_vmsa = vm_create_barebones(); sev_es_vm_no_vmsa = vm_create_barebones();
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL); sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
vm_vcpu_add(sev_es_vm_no_vmsa, 1); __vm_vcpu_add(sev_es_vm_no_vmsa, 1);
ret = __sev_migrate_from(sev_vm, sev_es_vm); ret = __sev_migrate_from(sev_vm, sev_es_vm);
TEST_ASSERT( TEST_ASSERT(
...@@ -278,7 +278,7 @@ static void test_sev_mirror(bool es) ...@@ -278,7 +278,7 @@ static void test_sev_mirror(bool es)
/* Check that we can complete creation of the mirror VM. */ /* Check that we can complete creation of the mirror VM. */
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
vm_vcpu_add(dst_vm, i); __vm_vcpu_add(dst_vm, i);
if (es) if (es)
sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
......
...@@ -51,10 +51,10 @@ static void *run_vcpu(void *_cpu_nr) ...@@ -51,10 +51,10 @@ static void *run_vcpu(void *_cpu_nr)
static bool first_cpu_done; static bool first_cpu_done;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
/* The kernel is fine, but vm_vcpu_add_default() needs locking */ /* The kernel is fine, but vm_vcpu_add() needs locking */
pthread_spin_lock(&create_lock); pthread_spin_lock(&create_lock);
vcpu = vm_vcpu_add_default(vm, vcpu_id, guest_code); vcpu = vm_vcpu_add(vm, vcpu_id, guest_code);
if (!first_cpu_done) { if (!first_cpu_done) {
first_cpu_done = true; first_cpu_done = true;
......
...@@ -425,7 +425,7 @@ int main(int argc, char *argv[]) ...@@ -425,7 +425,7 @@ int main(int argc, char *argv[])
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code); vm_vcpu_add(vm, SENDER_VCPU_ID, sender_guest_code);
test_data_page_vaddr = vm_vaddr_alloc_page(vm); test_data_page_vaddr = vm_vaddr_alloc_page(vm);
data = data =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment