Commit 96c852c8 authored by Paolo Bonzini's avatar Paolo Bonzini

kvm: selftests: Do not indent with spaces

Some indentation with spaces crept in, likely due to terminal-based
cut and paste.  Clean it up.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fa681181
...@@ -368,14 +368,14 @@ bool is_amd_cpu(void); ...@@ -368,14 +368,14 @@ bool is_amd_cpu(void);
static inline unsigned int x86_family(unsigned int eax) static inline unsigned int x86_family(unsigned int eax)
{ {
unsigned int x86; unsigned int x86;
x86 = (eax >> 8) & 0xf; x86 = (eax >> 8) & 0xf;
if (x86 == 0xf) if (x86 == 0xf)
x86 += (eax >> 20) & 0xff; x86 += (eax >> 20) & 0xff;
return x86; return x86;
} }
static inline unsigned int x86_model(unsigned int eax) static inline unsigned int x86_model(unsigned int eax)
......
...@@ -499,9 +499,11 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) ...@@ -499,9 +499,11 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
uint64_t first_page, uint32_t num_pages) uint64_t first_page, uint32_t num_pages)
{ {
struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot, struct kvm_clear_dirty_log args = {
.first_page = first_page, .dirty_bitmap = log, .slot = slot,
.num_pages = num_pages }; .first_page = first_page,
.num_pages = num_pages
};
int ret; int ret;
ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
......
...@@ -1144,25 +1144,25 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) ...@@ -1144,25 +1144,25 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
list->nmsrs = nmsrs; list->nmsrs = nmsrs;
r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list); r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
r); r);
state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0])); state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events); r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state); r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs); r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
r); r);
r = vcpu_save_xsave_state(vm, vcpu, state); r = vcpu_save_xsave_state(vm, vcpu, state);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
r); r);
if (kvm_check_cap(KVM_CAP_XCRS)) { if (kvm_check_cap(KVM_CAP_XCRS)) {
r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
...@@ -1171,17 +1171,17 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) ...@@ -1171,17 +1171,17 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
} }
r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
r); r);
if (nested_size) { if (nested_size) {
state->nested.size = sizeof(state->nested_); state->nested.size = sizeof(state->nested_);
r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested); r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
r); r);
TEST_ASSERT(state->nested.size <= nested_size, TEST_ASSERT(state->nested.size <= nested_size,
"Nested state size too big, %i (KVM_CHECK_CAP gave %i)", "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
state->nested.size, nested_size); state->nested.size, nested_size);
} else } else
state->nested.size = 0; state->nested.size = 0;
...@@ -1189,12 +1189,12 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) ...@@ -1189,12 +1189,12 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
for (i = 0; i < nmsrs; i++) for (i = 0; i < nmsrs; i++)
state->msrs.entries[i].index = list->indices[i]; state->msrs.entries[i].index = list->indices[i];
r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs); r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)", TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
r, r == nmsrs ? -1 : list->indices[r]); r, r == nmsrs ? -1 : list->indices[r]);
r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs); r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
r); r);
free(list); free(list);
return state; return state;
...@@ -1207,7 +1207,7 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s ...@@ -1207,7 +1207,7 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs); r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
TEST_ASSERT(r == state->msrs.nmsrs, TEST_ASSERT(r == state->msrs.nmsrs,
...@@ -1222,28 +1222,28 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s ...@@ -1222,28 +1222,28 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave); r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events); r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state); r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs); r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
r); r);
r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs); r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
r); r);
if (state->nested.size) { if (state->nested.size) {
r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested); r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i", TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
r); r);
} }
} }
......
...@@ -77,8 +77,8 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage) ...@@ -77,8 +77,8 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
switch (get_ucall(vm, vcpuid, &uc)) { switch (get_ucall(vm, vcpuid, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx", uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
stage + 1, (ulong)uc.args[1]); stage + 1, (ulong)uc.args[1]);
return; return;
case UCALL_DONE: case UCALL_DONE:
return; return;
......
...@@ -30,8 +30,8 @@ static struct kvm_vm *vm; ...@@ -30,8 +30,8 @@ static struct kvm_vm *vm;
static void l2_guest_code(void) static void l2_guest_code(void)
{ {
/* Exit to L0 */ /* Exit to L0 */
asm volatile("inb %%dx, %%al" asm volatile("inb %%dx, %%al"
: : [port] "d" (PORT_L0_EXIT) : "rax"); : : [port] "d" (PORT_L0_EXIT) : "rax");
} }
static void l1_guest_code(struct vmx_pages *vmx_pages) static void l1_guest_code(struct vmx_pages *vmx_pages)
......
...@@ -46,20 +46,20 @@ static struct kvm_vm *vm; ...@@ -46,20 +46,20 @@ static struct kvm_vm *vm;
#define MIN_STEAL_TIME 50000 #define MIN_STEAL_TIME 50000
struct pvclock_vcpu_time_info { struct pvclock_vcpu_time_info {
u32 version; u32 version;
u32 pad0; u32 pad0;
u64 tsc_timestamp; u64 tsc_timestamp;
u64 system_time; u64 system_time;
u32 tsc_to_system_mul; u32 tsc_to_system_mul;
s8 tsc_shift; s8 tsc_shift;
u8 flags; u8 flags;
u8 pad[2]; u8 pad[2];
} __attribute__((__packed__)); /* 32 bytes */ } __attribute__((__packed__)); /* 32 bytes */
struct pvclock_wall_clock { struct pvclock_wall_clock {
u32 version; u32 version;
u32 sec; u32 sec;
u32 nsec; u32 nsec;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct vcpu_runstate_info { struct vcpu_runstate_info {
...@@ -74,11 +74,11 @@ struct arch_vcpu_info { ...@@ -74,11 +74,11 @@ struct arch_vcpu_info {
}; };
struct vcpu_info { struct vcpu_info {
uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask; uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel; unsigned long evtchn_pending_sel;
struct arch_vcpu_info arch; struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time; struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */ }; /* 64 bytes (x86) */
struct shared_info { struct shared_info {
...@@ -493,7 +493,7 @@ int main(int argc, char *argv[]) ...@@ -493,7 +493,7 @@ int main(int argc, char *argv[])
vm_ts.tv_sec = wc->sec; vm_ts.tv_sec = wc->sec;
vm_ts.tv_nsec = wc->nsec; vm_ts.tv_nsec = wc->nsec;
TEST_ASSERT(wc->version && !(wc->version & 1), TEST_ASSERT(wc->version && !(wc->version & 1),
"Bad wallclock version %x", wc->version); "Bad wallclock version %x", wc->version);
TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old"); TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old");
TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new"); TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment