Commit 9715092f authored by Babu Moger's avatar Babu Moger Committed by Paolo Bonzini

KVM: X86: Move handling of INVPCID types to x86

INVPCID instruction handling is mostly same across both VMX and
SVM. So, move the code to common x86.c.
Signed-off-by: default avatarBabu Moger <babu.moger@amd.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Message-Id: <159985255212.11252.10322694343971983487.stgit@bmoger-ubuntu>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3f3393b3
...@@ -5497,16 +5497,11 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -5497,16 +5497,11 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
{ {
u32 vmx_instruction_info; u32 vmx_instruction_info;
unsigned long type; unsigned long type;
bool pcid_enabled;
gva_t gva; gva_t gva;
struct x86_exception e;
unsigned i;
unsigned long roots_to_free = 0;
struct { struct {
u64 pcid; u64 pcid;
u64 gla; u64 gla;
} operand; } operand;
int r;
if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
...@@ -5529,68 +5524,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -5529,68 +5524,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
sizeof(operand), &gva)) sizeof(operand), &gva))
return 1; return 1;
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); return kvm_handle_invpcid(vcpu, type, gva);
if (r != X86EMUL_CONTINUE)
return kvm_handle_memory_failure(vcpu, r, &e);
if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
return 1;
}
pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_SINGLE_CTXT:
if (!pcid_enabled && (operand.pcid != 0)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (kvm_get_active_pcid(vcpu) == operand.pcid) {
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
== operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
/*
* If neither the current cr3 nor any of the prev_roots use the
* given PCID, then nothing needs to be done here because a
* resync will happen anyway before switching to any other CR3.
*/
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_ALL_NON_GLOBAL:
/*
* Currently, KVM doesn't mark global entries in the shadow
* page tables, so a non-global flush just degenerates to a
* global flush. If needed, we could optimize this later by
* keeping track of global entries in shadow page tables.
*/
fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
return kvm_skip_emulated_instruction(vcpu);
default:
BUG(); /* We have already checked above that type <= 3 */
}
} }
static int handle_pml_full(struct kvm_vcpu *vcpu) static int handle_pml_full(struct kvm_vcpu *vcpu)
......
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/mshyperv.h> #include <asm/mshyperv.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/tlbflush.h>
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
#include <asm/emulate_prefix.h> #include <asm/emulate_prefix.h>
#include <clocksource/hyperv_timer.h> #include <clocksource/hyperv_timer.h>
...@@ -10793,6 +10794,83 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, ...@@ -10793,6 +10794,83 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
} }
EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{
bool pcid_enabled;
struct x86_exception e;
unsigned i;
unsigned long roots_to_free = 0;
struct {
u64 pcid;
u64 gla;
} operand;
int r;
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE)
return kvm_handle_memory_failure(vcpu, r, &e);
if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
return 1;
}
pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
switch (type) {
case INVPCID_TYPE_INDIV_ADDR:
if ((!pcid_enabled && (operand.pcid != 0)) ||
is_noncanonical_address(operand.gla, vcpu)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_SINGLE_CTXT:
if (!pcid_enabled && (operand.pcid != 0)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (kvm_get_active_pcid(vcpu) == operand.pcid) {
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
== operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
/*
* If neither the current cr3 nor any of the prev_roots use the
* given PCID, then nothing needs to be done here because a
* resync will happen anyway before switching to any other CR3.
*/
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_ALL_NON_GLOBAL:
/*
* Currently, KVM doesn't mark global entries in the shadow
* page tables, so a non-global flush just degenerates to a
* global flush. If needed, we could optimize this later by
* keeping track of global entries in shadow page tables.
*/
fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
return kvm_skip_emulated_instruction(vcpu);
default:
BUG(); /* We have already checked above that type <= 3 */
}
}
EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
......
...@@ -373,6 +373,7 @@ int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); ...@@ -373,6 +373,7 @@ int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e); struct x86_exception *e);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
#define KVM_MSR_RET_INVALID 2 #define KVM_MSR_RET_INVALID 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment