Commit 19efffa2 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MTRR: sort variable MTRRs

Sort all valid variable MTRRs based on its base address, it will help us to
check a range to see if it's fully contained in variable MTRRs
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
[Fix list insertion sort, simplify var_mtrr_range_is_valid to just
 test the V bit. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a13842dc
...@@ -345,12 +345,15 @@ enum { ...@@ -345,12 +345,15 @@ enum {
struct kvm_mtrr_range { struct kvm_mtrr_range {
u64 base; u64 base;
u64 mask; u64 mask;
struct list_head node;
}; };
struct kvm_mtrr { struct kvm_mtrr {
struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
u64 deftype; u64 deftype;
struct list_head head;
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
......
...@@ -285,6 +285,39 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) ...@@ -285,6 +285,39 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
} }
static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
{
return (range->mask & (1 << 11)) != 0;
}
static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
struct kvm_mtrr_range *tmp, *cur;
int index, is_mtrr_mask;
index = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * index;
cur = &mtrr_state->var_ranges[index];
/* remove the entry if it's in the list. */
if (var_mtrr_range_is_valid(cur))
list_del(&mtrr_state->var_ranges[index].node);
if (!is_mtrr_mask)
cur->base = data;
else
cur->mask = data;
/* add it to the list if it's enabled. */
if (var_mtrr_range_is_valid(cur)) {
list_for_each_entry(tmp, &mtrr_state->head, node)
if (cur->base >= tmp->base)
break;
list_add_tail(&cur->node, &tmp->node);
}
}
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{ {
int index; int index;
...@@ -299,16 +332,8 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -299,16 +332,8 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu->arch.mtrr_state.deftype = data; vcpu->arch.mtrr_state.deftype = data;
else if (msr == MSR_IA32_CR_PAT) else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data; vcpu->arch.pat = data;
else { /* Variable MTRRs */ else
int is_mtrr_mask; set_var_mtrr_msr(vcpu, msr, data);
index = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * index;
if (!is_mtrr_mask)
vcpu->arch.mtrr_state.var_ranges[index].base = data;
else
vcpu->arch.mtrr_state.var_ranges[index].mask = data;
}
update_mtrr(vcpu, msr); update_mtrr(vcpu, msr);
return 0; return 0;
...@@ -354,6 +379,11 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -354,6 +379,11 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0; return 0;
} }
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
{
INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
}
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
......
...@@ -7379,13 +7379,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -7379,13 +7379,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
int r; int r;
kvm_vcpu_mtrr_init(vcpu);
r = vcpu_load(vcpu); r = vcpu_load(vcpu);
if (r) if (r)
return r; return r;
kvm_vcpu_reset(vcpu, false); kvm_vcpu_reset(vcpu, false);
kvm_mmu_setup(vcpu); kvm_mmu_setup(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
return r; return r;
} }
......
...@@ -162,6 +162,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, ...@@ -162,6 +162,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception); struct x86_exception *exception);
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment