Commit 4e15a0dd authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: SEV: snapshot the GHCB before accessing it

Validation of the GHCB is susceptible to time-of-check/time-of-use vulnerabilities.
To avoid them, we would like to always snapshot the fields that are read in
sev_es_validate_vmgexit(), and not use the GHCB anymore after it returns.

This means:

- invoking sev_es_sync_from_ghcb() before any GHCB access, including before
  sev_es_validate_vmgexit()

- snapshotting all fields including the valid bitmap and the sw_scratch field,
  which are currently not caching anywhere.

The valid bitmap is the first thing to be copied out of the GHCB; then,
further accesses will use the copy in svm->sev_es.

Fixes: 291bd20d ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5a759117
...@@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) ...@@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
*/ */
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
if (ghcb_xcr0_is_valid(ghcb)) { svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
if (kvm_ghcb_xcr0_is_valid(svm)) {
vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
kvm_update_cpuid_runtime(vcpu); kvm_update_cpuid_runtime(vcpu);
} }
...@@ -2436,6 +2439,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) ...@@ -2436,6 +2439,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
control->exit_code_hi = upper_32_bits(exit_code); control->exit_code_hi = upper_32_bits(exit_code);
control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
/* Clear the valid entries fields */ /* Clear the valid entries fields */
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
...@@ -2464,56 +2468,56 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2464,56 +2468,56 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
reason = GHCB_ERR_MISSING_INPUT; reason = GHCB_ERR_MISSING_INPUT;
if (!ghcb_sw_exit_code_is_valid(ghcb) || if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
!ghcb_sw_exit_info_1_is_valid(ghcb) || !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
!ghcb_sw_exit_info_2_is_valid(ghcb)) !kvm_ghcb_sw_exit_info_2_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
switch (ghcb_get_sw_exit_code(ghcb)) { switch (ghcb_get_sw_exit_code(ghcb)) {
case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR7:
break; break;
case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR7:
if (!ghcb_rax_is_valid(ghcb)) if (!kvm_ghcb_rax_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_EXIT_RDTSC: case SVM_EXIT_RDTSC:
break; break;
case SVM_EXIT_RDPMC: case SVM_EXIT_RDPMC:
if (!ghcb_rcx_is_valid(ghcb)) if (!kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_EXIT_CPUID: case SVM_EXIT_CPUID:
if (!ghcb_rax_is_valid(ghcb) || if (!kvm_ghcb_rax_is_valid(svm) ||
!ghcb_rcx_is_valid(ghcb)) !kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
if (ghcb_get_rax(ghcb) == 0xd) if (ghcb_get_rax(ghcb) == 0xd)
if (!ghcb_xcr0_is_valid(ghcb)) if (!kvm_ghcb_xcr0_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_EXIT_INVD: case SVM_EXIT_INVD:
break; break;
case SVM_EXIT_IOIO: case SVM_EXIT_IOIO:
if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
if (!ghcb_sw_scratch_is_valid(ghcb)) if (!kvm_ghcb_sw_scratch_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
} else { } else {
if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
if (!ghcb_rax_is_valid(ghcb)) if (!kvm_ghcb_rax_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
} }
break; break;
case SVM_EXIT_MSR: case SVM_EXIT_MSR:
if (!ghcb_rcx_is_valid(ghcb)) if (!kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
if (ghcb_get_sw_exit_info_1(ghcb)) { if (ghcb_get_sw_exit_info_1(ghcb)) {
if (!ghcb_rax_is_valid(ghcb) || if (!kvm_ghcb_rax_is_valid(svm) ||
!ghcb_rdx_is_valid(ghcb)) !kvm_ghcb_rdx_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
} }
break; break;
case SVM_EXIT_VMMCALL: case SVM_EXIT_VMMCALL:
if (!ghcb_rax_is_valid(ghcb) || if (!kvm_ghcb_rax_is_valid(svm) ||
!ghcb_cpl_is_valid(ghcb)) !kvm_ghcb_cpl_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_EXIT_RDTSCP: case SVM_EXIT_RDTSCP:
...@@ -2521,19 +2525,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2521,19 +2525,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_EXIT_WBINVD: case SVM_EXIT_WBINVD:
break; break;
case SVM_EXIT_MONITOR: case SVM_EXIT_MONITOR:
if (!ghcb_rax_is_valid(ghcb) || if (!kvm_ghcb_rax_is_valid(svm) ||
!ghcb_rcx_is_valid(ghcb) || !kvm_ghcb_rcx_is_valid(svm) ||
!ghcb_rdx_is_valid(ghcb)) !kvm_ghcb_rdx_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_EXIT_MWAIT: case SVM_EXIT_MWAIT:
if (!ghcb_rax_is_valid(ghcb) || if (!kvm_ghcb_rax_is_valid(svm) ||
!ghcb_rcx_is_valid(ghcb)) !kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_VMGEXIT_MMIO_READ: case SVM_VMGEXIT_MMIO_READ:
case SVM_VMGEXIT_MMIO_WRITE: case SVM_VMGEXIT_MMIO_WRITE:
if (!ghcb_sw_scratch_is_valid(ghcb)) if (!kvm_ghcb_sw_scratch_is_valid(svm))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_VMGEXIT_NMI_COMPLETE: case SVM_VMGEXIT_NMI_COMPLETE:
...@@ -2563,9 +2567,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -2563,9 +2567,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
dump_ghcb(svm); dump_ghcb(svm);
} }
/* Clear the valid entries fields */
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
ghcb_set_sw_exit_info_1(ghcb, 2); ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, reason); ghcb_set_sw_exit_info_2(ghcb, reason);
...@@ -2586,7 +2587,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) ...@@ -2586,7 +2587,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
*/ */
if (svm->sev_es.ghcb_sa_sync) { if (svm->sev_es.ghcb_sa_sync) {
kvm_write_guest(svm->vcpu.kvm, kvm_write_guest(svm->vcpu.kvm,
ghcb_get_sw_scratch(svm->sev_es.ghcb), svm->sev_es.sw_scratch,
svm->sev_es.ghcb_sa, svm->sev_es.ghcb_sa,
svm->sev_es.ghcb_sa_len); svm->sev_es.ghcb_sa_len);
svm->sev_es.ghcb_sa_sync = false; svm->sev_es.ghcb_sa_sync = false;
...@@ -2637,7 +2638,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) ...@@ -2637,7 +2638,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
u64 scratch_gpa_beg, scratch_gpa_end; u64 scratch_gpa_beg, scratch_gpa_end;
void *scratch_va; void *scratch_va;
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); scratch_gpa_beg = svm->sev_es.sw_scratch;
if (!scratch_gpa_beg) { if (!scratch_gpa_beg) {
pr_err("vmgexit: scratch gpa not provided\n"); pr_err("vmgexit: scratch gpa not provided\n");
goto e_scratch; goto e_scratch;
...@@ -2853,11 +2854,11 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) ...@@ -2853,11 +2854,11 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
exit_code = ghcb_get_sw_exit_code(ghcb); exit_code = ghcb_get_sw_exit_code(ghcb);
sev_es_sync_from_ghcb(svm);
ret = sev_es_validate_vmgexit(svm); ret = sev_es_validate_vmgexit(svm);
if (ret) if (ret)
return ret; return ret;
sev_es_sync_from_ghcb(svm);
ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0);
......
...@@ -190,10 +190,12 @@ struct vcpu_sev_es_state { ...@@ -190,10 +190,12 @@ struct vcpu_sev_es_state {
/* SEV-ES support */ /* SEV-ES support */
struct sev_es_save_area *vmsa; struct sev_es_save_area *vmsa;
struct ghcb *ghcb; struct ghcb *ghcb;
u8 valid_bitmap[16];
struct kvm_host_map ghcb_map; struct kvm_host_map ghcb_map;
bool received_first_sipi; bool received_first_sipi;
/* SEV-ES scratch area support */ /* SEV-ES scratch area support */
u64 sw_scratch;
void *ghcb_sa; void *ghcb_sa;
u32 ghcb_sa_len; u32 ghcb_sa_len;
bool ghcb_sa_sync; bool ghcb_sa_sync;
...@@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); ...@@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
{ \
return test_bit(GHCB_BITMAP_IDX(field), \
(unsigned long *)&svm->sev_es.valid_bitmap); \
} \
\
static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
{ \
return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
} \
DEFINE_KVM_GHCB_ACCESSORS(cpl)
DEFINE_KVM_GHCB_ACCESSORS(rax)
DEFINE_KVM_GHCB_ACCESSORS(rcx)
DEFINE_KVM_GHCB_ACCESSORS(rdx)
DEFINE_KVM_GHCB_ACCESSORS(rbx)
DEFINE_KVM_GHCB_ACCESSORS(rsi)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
DEFINE_KVM_GHCB_ACCESSORS(xcr0)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment