Commit 1d804d07 authored by Joe Perches's avatar Joe Perches Committed by Paolo Bonzini

x86: Use bool function return values of true/false not 1/0

Use the normal return values for bool functions
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Message-Id: <9f593eb2f43b456851cd73f7ed09654ca58fb570.1427759009.git.joe@perches.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2f729b10
...@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void) ...@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void)
static inline bool kvm_para_available(void) static inline bool kvm_para_available(void)
{ {
return 0; return false;
} }
static inline unsigned int kvm_arch_para_features(void) static inline unsigned int kvm_arch_para_features(void)
......
...@@ -26,7 +26,7 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) ...@@ -26,7 +26,7 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
if (!static_cpu_has(X86_FEATURE_XSAVE)) if (!static_cpu_has(X86_FEATURE_XSAVE))
return 0; return false;
best = kvm_find_cpuid_entry(vcpu, 1, 0); best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE)); return best && (best->ecx & bit(X86_FEATURE_XSAVE));
......
...@@ -7314,21 +7314,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, ...@@ -7314,21 +7314,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
else if (port < 0x10000) else if (port < 0x10000)
bitmap = vmcs12->io_bitmap_b; bitmap = vmcs12->io_bitmap_b;
else else
return 1; return true;
bitmap += (port & 0x7fff) / 8; bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap) if (last_bitmap != bitmap)
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
return 1; return true;
if (b & (1 << (port & 7))) if (b & (1 << (port & 7)))
return 1; return true;
port++; port++;
size--; size--;
last_bitmap = bitmap; last_bitmap = bitmap;
} }
return 0; return false;
} }
/* /*
...@@ -7344,7 +7344,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, ...@@ -7344,7 +7344,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
gpa_t bitmap; gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return 1; return true;
/* /*
* The MSR_BITMAP page is divided into four 1024-byte bitmaps, * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
...@@ -7363,10 +7363,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, ...@@ -7363,10 +7363,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
if (msr_index < 1024*8) { if (msr_index < 1024*8) {
unsigned char b; unsigned char b;
if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
return 1; return true;
return 1 & (b >> (msr_index & 7)); return 1 & (b >> (msr_index & 7));
} else } else
return 1; /* let L1 handle the wrong parameter */ return true; /* let L1 handle the wrong parameter */
} }
/* /*
...@@ -7388,7 +7388,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, ...@@ -7388,7 +7388,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
case 0: case 0:
if (vmcs12->cr0_guest_host_mask & if (vmcs12->cr0_guest_host_mask &
(val ^ vmcs12->cr0_read_shadow)) (val ^ vmcs12->cr0_read_shadow))
return 1; return true;
break; break;
case 3: case 3:
if ((vmcs12->cr3_target_count >= 1 && if ((vmcs12->cr3_target_count >= 1 &&
...@@ -7399,37 +7399,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, ...@@ -7399,37 +7399,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
vmcs12->cr3_target_value2 == val) || vmcs12->cr3_target_value2 == val) ||
(vmcs12->cr3_target_count >= 4 && (vmcs12->cr3_target_count >= 4 &&
vmcs12->cr3_target_value3 == val)) vmcs12->cr3_target_value3 == val))
return 0; return false;
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
return 1; return true;
break; break;
case 4: case 4:
if (vmcs12->cr4_guest_host_mask & if (vmcs12->cr4_guest_host_mask &
(vmcs12->cr4_read_shadow ^ val)) (vmcs12->cr4_read_shadow ^ val))
return 1; return true;
break; break;
case 8: case 8:
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
return 1; return true;
break; break;
} }
break; break;
case 2: /* clts */ case 2: /* clts */
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
(vmcs12->cr0_read_shadow & X86_CR0_TS)) (vmcs12->cr0_read_shadow & X86_CR0_TS))
return 1; return true;
break; break;
case 1: /* mov from cr */ case 1: /* mov from cr */
switch (cr) { switch (cr) {
case 3: case 3:
if (vmcs12->cpu_based_vm_exec_control & if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR3_STORE_EXITING) CPU_BASED_CR3_STORE_EXITING)
return 1; return true;
break; break;
case 8: case 8:
if (vmcs12->cpu_based_vm_exec_control & if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR8_STORE_EXITING) CPU_BASED_CR8_STORE_EXITING)
return 1; return true;
break; break;
} }
break; break;
...@@ -7440,14 +7440,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, ...@@ -7440,14 +7440,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
*/ */
if (vmcs12->cr0_guest_host_mask & 0xe & if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow)) (val ^ vmcs12->cr0_read_shadow))
return 1; return true;
if ((vmcs12->cr0_guest_host_mask & 0x1) && if ((vmcs12->cr0_guest_host_mask & 0x1) &&
!(vmcs12->cr0_read_shadow & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) &&
(val & 0x1)) (val & 0x1))
return 1; return true;
break; break;
} }
return 0; return false;
} }
/* /*
...@@ -7470,43 +7470,43 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7470,43 +7470,43 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
KVM_ISA_VMX); KVM_ISA_VMX);
if (vmx->nested.nested_run_pending) if (vmx->nested.nested_run_pending)
return 0; return false;
if (unlikely(vmx->fail)) { if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__, pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR)); vmcs_read32(VM_INSTRUCTION_ERROR));
return 1; return true;
} }
switch (exit_reason) { switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI: case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info)) if (!is_exception(intr_info))
return 0; return false;
else if (is_page_fault(intr_info)) else if (is_page_fault(intr_info))
return enable_ept; return enable_ept;
else if (is_no_device(intr_info) && else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS)) !(vmcs12->guest_cr0 & X86_CR0_TS))
return 0; return false;
return vmcs12->exception_bitmap & return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK)); (1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT: case EXIT_REASON_EXTERNAL_INTERRUPT:
return 0; return false;
case EXIT_REASON_TRIPLE_FAULT: case EXIT_REASON_TRIPLE_FAULT:
return 1; return true;
case EXIT_REASON_PENDING_INTERRUPT: case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW: case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH: case EXIT_REASON_TASK_SWITCH:
return 1; return true;
case EXIT_REASON_CPUID: case EXIT_REASON_CPUID:
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
return 0; return false;
return 1; return true;
case EXIT_REASON_HLT: case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD: case EXIT_REASON_INVD:
return 1; return true;
case EXIT_REASON_INVLPG: case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC: case EXIT_REASON_RDPMC:
...@@ -7523,7 +7523,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7523,7 +7523,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* VMX instructions trap unconditionally. This allows L1 to * VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting! * emulate them for its L2 guest, i.e., allows 3-level nesting!
*/ */
return 1; return true;
case EXIT_REASON_CR_ACCESS: case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12); return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS: case EXIT_REASON_DR_ACCESS:
...@@ -7534,7 +7534,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7534,7 +7534,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_MSR_WRITE: case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE: case EXIT_REASON_INVALID_STATE:
return 1; return true;
case EXIT_REASON_MWAIT_INSTRUCTION: case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION: case EXIT_REASON_MONITOR_INSTRUCTION:
...@@ -7544,7 +7544,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7544,7 +7544,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
nested_cpu_has2(vmcs12, nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING); SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY: case EXIT_REASON_MCE_DURING_VMENTRY:
return 0; return false;
case EXIT_REASON_TPR_BELOW_THRESHOLD: case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS: case EXIT_REASON_APIC_ACCESS:
...@@ -7553,7 +7553,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7553,7 +7553,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_APIC_WRITE: case EXIT_REASON_APIC_WRITE:
case EXIT_REASON_EOI_INDUCED: case EXIT_REASON_EOI_INDUCED:
/* apic_write and eoi_induced should exit unconditionally. */ /* apic_write and eoi_induced should exit unconditionally. */
return 1; return true;
case EXIT_REASON_EPT_VIOLATION: case EXIT_REASON_EPT_VIOLATION:
/* /*
* L0 always deals with the EPT violation. If nested EPT is * L0 always deals with the EPT violation. If nested EPT is
...@@ -7561,7 +7561,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7561,7 +7561,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* missing in the guest EPT table (EPT12), the EPT violation * missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault() * will be injected with nested_ept_inject_page_fault()
*/ */
return 0; return false;
case EXIT_REASON_EPT_MISCONFIG: case EXIT_REASON_EPT_MISCONFIG:
/* /*
* L2 never uses directly L1's EPT, but rather L0's own EPT * L2 never uses directly L1's EPT, but rather L0's own EPT
...@@ -7569,11 +7569,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7569,11 +7569,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* (EPT on EPT). So any problems with the structure of the * (EPT on EPT). So any problems with the structure of the
* table is L0's fault. * table is L0's fault.
*/ */
return 0; return false;
case EXIT_REASON_WBINVD: case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV: case EXIT_REASON_XSETBV:
return 1; return true;
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
/* /*
* This should never happen, since it is not possible to * This should never happen, since it is not possible to
...@@ -7583,7 +7583,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -7583,7 +7583,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
*/ */
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
default: default:
return 1; return true;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment