Commit 90bf8d98 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more kvm fixes from Paolo Bonzini:

 - Static analysis fix

 - New SEV-ES protocol for communicating invalid VMGEXIT requests

 - Ensure APICv is considered inactive if there is no APIC

 - Fix reserved bits for AMD PerfEvtSeln register

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: SVM: Do not terminate SEV-ES guests on GHCB validation failure
  KVM: SEV: Fall back to vmalloc for SEV-ES scratch area if necessary
  KVM: SEV: Return appropriate error codes if SEV-ES scratch setup fails
  KVM: x86/mmu: Retry page fault if root is invalidated by memslot update
  KVM: VMX: Set failure code in prepare_vmcs02()
  KVM: ensure APICv is considered inactive if there is no APIC
  KVM: x86/pmu: Fix reserved bits for AMD PerfEvtSeln register
parents 79a72162 ad5b3532
...@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter { ...@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
#define APICV_INHIBIT_REASON_PIT_REINJ 4 #define APICV_INHIBIT_REASON_PIT_REINJ 4
#define APICV_INHIBIT_REASON_X2APIC 5 #define APICV_INHIBIT_REASON_X2APIC 5
#define APICV_INHIBIT_REASON_BLOCKIRQ 6 #define APICV_INHIBIT_REASON_BLOCKIRQ 6
#define APICV_INHIBIT_REASON_ABSENT 7
struct kvm_arch { struct kvm_arch {
unsigned long n_used_mmu_pages; unsigned long n_used_mmu_pages;
......
...@@ -73,4 +73,15 @@ ...@@ -73,4 +73,15 @@
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK) #define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
/*
* Error codes related to GHCB input that can be communicated back to the guest
* by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
*/
#define GHCB_ERR_NOT_REGISTERED 1
#define GHCB_ERR_INVALID_USAGE 2
#define GHCB_ERR_INVALID_SCRATCH_AREA 3
#define GHCB_ERR_MISSING_INPUT 4
#define GHCB_ERR_INVALID_INPUT 5
#define GHCB_ERR_INVALID_EVENT 6
#endif #endif
...@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { } ...@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
return sp->role.invalid || if (sp->role.invalid)
return true;
/* TDP MMU pages due not use the MMU generation. */
return !sp->tdp_mmu_page &&
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
} }
...@@ -3976,6 +3980,20 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -3976,6 +3980,20 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
return true; return true;
} }
/*
* Returns true if the page fault is stale and needs to be retried, i.e. if the
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
*/
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
{
if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
return true;
return fault->slot &&
mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
}
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
...@@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else else
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock; goto out_unlock;
r = make_mmu_pages_available(vcpu); r = make_mmu_pages_available(vcpu);
if (r) if (r)
goto out_unlock; goto out_unlock;
......
...@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = RET_PF_RETRY; r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock; goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
......
...@@ -900,6 +900,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, ...@@ -900,6 +900,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
bool svm_check_apicv_inhibit_reasons(ulong bit) bool svm_check_apicv_inhibit_reasons(ulong bit)
{ {
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_NESTED) | BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN) | BIT(APICV_INHIBIT_REASON_IRQWIN) |
......
...@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu->reserved_bits = 0xffffffff00200000ull; pmu->reserved_bits = 0xfffffff000280000ull;
pmu->version = 1; pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */ /* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
......
...@@ -2260,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -2260,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(virt_to_page(svm->sev_es.vmsa)); __free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->sev_es.ghcb_sa_free) if (svm->sev_es.ghcb_sa_free)
kfree(svm->sev_es.ghcb_sa); kvfree(svm->sev_es.ghcb_sa);
} }
static void dump_ghcb(struct vcpu_svm *svm) static void dump_ghcb(struct vcpu_svm *svm)
...@@ -2352,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) ...@@ -2352,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
} }
static int sev_es_validate_vmgexit(struct vcpu_svm *svm) static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct ghcb *ghcb; struct ghcb *ghcb;
u64 exit_code = 0; u64 exit_code;
u64 reason;
ghcb = svm->sev_es.ghcb; ghcb = svm->sev_es.ghcb;
/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage)
goto vmgexit_err;
/* /*
* Retrieve the exit code now even though is may not be marked valid * Retrieve the exit code now even though it may not be marked valid
* as it could help with debugging. * as it could help with debugging.
*/ */
exit_code = ghcb_get_sw_exit_code(ghcb); exit_code = ghcb_get_sw_exit_code(ghcb);
/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage) {
reason = GHCB_ERR_INVALID_USAGE;
goto vmgexit_err;
}
reason = GHCB_ERR_MISSING_INPUT;
if (!ghcb_sw_exit_code_is_valid(ghcb) || if (!ghcb_sw_exit_code_is_valid(ghcb) ||
!ghcb_sw_exit_info_1_is_valid(ghcb) || !ghcb_sw_exit_info_1_is_valid(ghcb) ||
!ghcb_sw_exit_info_2_is_valid(ghcb)) !ghcb_sw_exit_info_2_is_valid(ghcb))
...@@ -2448,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2448,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break; break;
default: default:
reason = GHCB_ERR_INVALID_EVENT;
goto vmgexit_err; goto vmgexit_err;
} }
return 0; return true;
vmgexit_err: vmgexit_err:
vcpu = &svm->vcpu; vcpu = &svm->vcpu;
if (ghcb->ghcb_usage) { if (reason == GHCB_ERR_INVALID_USAGE) {
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
ghcb->ghcb_usage); ghcb->ghcb_usage);
} else if (reason == GHCB_ERR_INVALID_EVENT) {
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
exit_code);
} else { } else {
vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n", vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
exit_code); exit_code);
dump_ghcb(svm); dump_ghcb(svm);
} }
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; /* Clear the valid entries fields */
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_code; ghcb_set_sw_exit_info_1(ghcb, 2);
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; ghcb_set_sw_exit_info_2(ghcb, reason);
return -EINVAL; return false;
} }
void sev_es_unmap_ghcb(struct vcpu_svm *svm) void sev_es_unmap_ghcb(struct vcpu_svm *svm)
...@@ -2493,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) ...@@ -2493,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
svm->sev_es.ghcb_sa_sync = false; svm->sev_es.ghcb_sa_sync = false;
} }
kfree(svm->sev_es.ghcb_sa); kvfree(svm->sev_es.ghcb_sa);
svm->sev_es.ghcb_sa = NULL; svm->sev_es.ghcb_sa = NULL;
svm->sev_es.ghcb_sa_free = false; svm->sev_es.ghcb_sa_free = false;
} }
...@@ -2541,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2541,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
if (!scratch_gpa_beg) { if (!scratch_gpa_beg) {
pr_err("vmgexit: scratch gpa not provided\n"); pr_err("vmgexit: scratch gpa not provided\n");
return false; goto e_scratch;
} }
scratch_gpa_end = scratch_gpa_beg + len; scratch_gpa_end = scratch_gpa_beg + len;
if (scratch_gpa_end < scratch_gpa_beg) { if (scratch_gpa_end < scratch_gpa_beg) {
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n", pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
len, scratch_gpa_beg); len, scratch_gpa_beg);
return false; goto e_scratch;
} }
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
...@@ -2566,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2566,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_end > ghcb_scratch_end) { scratch_gpa_end > ghcb_scratch_end) {
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
scratch_gpa_beg, scratch_gpa_end); scratch_gpa_beg, scratch_gpa_end);
return false; goto e_scratch;
} }
scratch_va = (void *)svm->sev_es.ghcb; scratch_va = (void *)svm->sev_es.ghcb;
...@@ -2579,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2579,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
if (len > GHCB_SCRATCH_AREA_LIMIT) { if (len > GHCB_SCRATCH_AREA_LIMIT) {
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n", pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
len, GHCB_SCRATCH_AREA_LIMIT); len, GHCB_SCRATCH_AREA_LIMIT);
return false; goto e_scratch;
} }
scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT); scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
if (!scratch_va) if (!scratch_va)
return false; goto e_scratch;
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
/* Unable to copy scratch area from guest */ /* Unable to copy scratch area from guest */
pr_err("vmgexit: kvm_read_guest for scratch area failed\n"); pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
kfree(scratch_va); kvfree(scratch_va);
return false; goto e_scratch;
} }
/* /*
...@@ -2607,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2607,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
svm->sev_es.ghcb_sa_len = len; svm->sev_es.ghcb_sa_len = len;
return true; return true;
e_scratch:
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
return false;
} }
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
...@@ -2657,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ...@@ -2657,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID); ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
if (!ret) { if (!ret) {
ret = -EINVAL; /* Error, keep GHCB MSR value as-is */
break; break;
} }
...@@ -2693,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ...@@ -2693,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_TERM_REASON_POS); GHCB_MSR_TERM_REASON_POS);
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
reason_set, reason_code); reason_set, reason_code);
fallthrough;
ret = -EINVAL;
break;
} }
default: default:
ret = -EINVAL; /* Error, keep GHCB MSR value as-is */
break;
} }
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
...@@ -2720,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2720,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
if (!ghcb_gpa) { if (!ghcb_gpa) {
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n"); vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
return -EINVAL;
/* Without a GHCB, just return right back to the guest */
return 1;
} }
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */ /* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa); ghcb_gpa);
return -EINVAL;
/* Without a GHCB, just return right back to the guest */
return 1;
} }
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
...@@ -2737,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2737,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
exit_code = ghcb_get_sw_exit_code(ghcb); exit_code = ghcb_get_sw_exit_code(ghcb);
ret = sev_es_validate_vmgexit(svm); if (!sev_es_validate_vmgexit(svm))
if (ret) return 1;
return ret;
sev_es_sync_from_ghcb(svm); sev_es_sync_from_ghcb(svm);
ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0);
ret = -EINVAL; ret = 1;
switch (exit_code) { switch (exit_code) {
case SVM_VMGEXIT_MMIO_READ: case SVM_VMGEXIT_MMIO_READ:
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
...@@ -2786,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2786,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
default: default:
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
control->exit_info_1); control->exit_info_1);
ghcb_set_sw_exit_info_1(ghcb, 1); ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
X86_TRAP_UD |
SVM_EVTINJ_TYPE_EXEPT |
SVM_EVTINJ_VALID);
} }
ret = 1;
break; break;
} }
case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(vcpu, vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
control->exit_info_1, control->exit_info_2); control->exit_info_1, control->exit_info_2);
ret = -EINVAL;
break; break;
default: default:
ret = svm_invoke_exit_handler(vcpu, exit_code); ret = svm_invoke_exit_handler(vcpu, exit_code);
...@@ -2821,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) ...@@ -2821,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
return -EINVAL; return -EINVAL;
if (!setup_vmgexit_scratch(svm, in, bytes)) if (!setup_vmgexit_scratch(svm, in, bytes))
return -EINVAL; return 1;
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
count, in); count, in);
......
...@@ -2591,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2591,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
vmcs12->guest_ia32_perf_global_ctrl))) vmcs12->guest_ia32_perf_global_ctrl))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL; return -EINVAL;
}
kvm_rsp_write(vcpu, vmcs12->guest_rsp); kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip); kvm_rip_write(vcpu, vmcs12->guest_rip);
......
...@@ -7525,6 +7525,7 @@ static void hardware_unsetup(void) ...@@ -7525,6 +7525,7 @@ static void hardware_unsetup(void)
static bool vmx_check_apicv_inhibit_reasons(ulong bit) static bool vmx_check_apicv_inhibit_reasons(ulong bit)
{ {
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_BLOCKIRQ); BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
......
...@@ -5740,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, ...@@ -5740,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
smp_wmb(); smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
r = 0; r = 0;
split_irqchip_unlock: split_irqchip_unlock:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
...@@ -6120,6 +6121,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -6120,6 +6121,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */ /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
smp_wmb(); smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
create_irqchip_unlock: create_irqchip_unlock:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
break; break;
...@@ -8818,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm) ...@@ -8818,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm)
{ {
init_rwsem(&kvm->arch.apicv_update_lock); init_rwsem(&kvm->arch.apicv_update_lock);
if (enable_apicv) set_bit(APICV_INHIBIT_REASON_ABSENT,
clear_bit(APICV_INHIBIT_REASON_DISABLE, &kvm->arch.apicv_inhibit_reasons);
&kvm->arch.apicv_inhibit_reasons); if (!enable_apicv)
else
set_bit(APICV_INHIBIT_REASON_DISABLE, set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons); &kvm->arch.apicv_inhibit_reasons);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment