Commit 54bf36aa authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: use vcpu-specific functions to read/write/translate GFNs

We need to hide SMRAM from guests not running in SMM.  Therefore,
all uses of kvm_read_guest* and kvm_write_guest* must be changed to
check whether the VCPU is in system management mode and use a
different set of memslots.  Switch from kvm_* to the newly-introduced
kvm_vcpu_*, which call into kvm_arch_vcpu_memslots_id.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e4cd1da9
...@@ -887,7 +887,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -887,7 +887,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask); gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
......
...@@ -223,15 +223,15 @@ static unsigned int get_mmio_spte_generation(u64 spte) ...@@ -223,15 +223,15 @@ static unsigned int get_mmio_spte_generation(u64 spte)
return gen; return gen;
} }
static unsigned int kvm_current_mmio_generation(struct kvm *kvm) static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
{ {
return kvm_memslots(kvm)->generation & MMIO_GEN_MASK; return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
} }
static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned access) unsigned access)
{ {
unsigned int gen = kvm_current_mmio_generation(kvm); unsigned int gen = kvm_current_mmio_generation(vcpu);
u64 mask = generation_mmio_spte_mask(gen); u64 mask = generation_mmio_spte_mask(gen);
access &= ACC_WRITE_MASK | ACC_USER_MASK; access &= ACC_WRITE_MASK | ACC_USER_MASK;
...@@ -258,22 +258,22 @@ static unsigned get_mmio_spte_access(u64 spte) ...@@ -258,22 +258,22 @@ static unsigned get_mmio_spte_access(u64 spte)
return (spte & ~mask) & ~PAGE_MASK; return (spte & ~mask) & ~PAGE_MASK;
} }
static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
pfn_t pfn, unsigned access) pfn_t pfn, unsigned access)
{ {
if (unlikely(is_noslot_pfn(pfn))) { if (unlikely(is_noslot_pfn(pfn))) {
mark_mmio_spte(kvm, sptep, gfn, access); mark_mmio_spte(vcpu, sptep, gfn, access);
return true; return true;
} }
return false; return false;
} }
static bool check_mmio_spte(struct kvm *kvm, u64 spte) static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{ {
unsigned int kvm_gen, spte_gen; unsigned int kvm_gen, spte_gen;
kvm_gen = kvm_current_mmio_generation(kvm); kvm_gen = kvm_current_mmio_generation(vcpu);
spte_gen = get_mmio_spte_generation(spte); spte_gen = get_mmio_spte_generation(spte);
trace_check_mmio_spte(spte, kvm_gen, spte_gen); trace_check_mmio_spte(spte, kvm_gen, spte_gen);
...@@ -837,14 +837,14 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -837,14 +837,14 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm->arch.indirect_shadow_pages--; kvm->arch.indirect_shadow_pages--;
} }
static int has_wrprotected_page(struct kvm *kvm, static int has_wrprotected_page(struct kvm_vcpu *vcpu,
gfn_t gfn, gfn_t gfn,
int level) int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (slot) { if (slot) {
linfo = lpage_info_slot(gfn, slot, level); linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count; return linfo->write_count;
...@@ -876,7 +876,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -876,7 +876,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
slot = gfn_to_memslot(vcpu->kvm, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (!slot || slot->flags & KVM_MEMSLOT_INVALID || if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
(no_dirty_log && slot->dirty_bitmap)) (no_dirty_log && slot->dirty_bitmap))
slot = NULL; slot = NULL;
...@@ -901,7 +901,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) ...@@ -901,7 +901,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
max_level = min(kvm_x86_ops->get_lpage_level(), host_level); max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) if (has_wrprotected_page(vcpu, large_gfn, level))
break; break;
return level - 1; return level - 1;
...@@ -1336,18 +1336,18 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -1336,18 +1336,18 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
} }
static bool rmap_write_protect(struct kvm *kvm, u64 gfn) static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long *rmapp; unsigned long *rmapp;
int i; int i;
bool write_protected = false; bool write_protected = false;
slot = gfn_to_memslot(kvm, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot); rmapp = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmapp, true); write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true);
} }
return write_protected; return write_protected;
...@@ -2032,7 +2032,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -2032,7 +2032,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
bool protected = false; bool protected = false;
for_each_sp(pages, sp, parents, i) for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu->kvm, sp->gfn); protected |= rmap_write_protect(vcpu, sp->gfn);
if (protected) if (protected)
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
...@@ -2130,7 +2130,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2130,7 +2130,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
hlist_add_head(&sp->hash_link, hlist_add_head(&sp->hash_link,
&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
if (!direct) { if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn)) if (rmap_write_protect(vcpu, gfn))
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
if (level > PT_PAGE_TABLE_LEVEL && need_sync) if (level > PT_PAGE_TABLE_LEVEL && need_sync)
kvm_sync_pages(vcpu, gfn); kvm_sync_pages(vcpu, gfn);
...@@ -2581,7 +2581,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2581,7 +2581,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
u64 spte; u64 spte;
int ret = 0; int ret = 0;
if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0; return 0;
spte = PT_PRESENT_MASK; spte = PT_PRESENT_MASK;
...@@ -2618,7 +2618,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2618,7 +2618,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* be fixed if guest refault. * be fixed if guest refault.
*/ */
if (level > PT_PAGE_TABLE_LEVEL && if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu->kvm, gfn, level)) has_wrprotected_page(vcpu, gfn, level))
goto done; goto done;
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
...@@ -2642,7 +2642,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2642,7 +2642,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
} }
if (pte_access & ACC_WRITE_MASK) { if (pte_access & ACC_WRITE_MASK) {
mark_page_dirty(vcpu->kvm, gfn); kvm_vcpu_mark_page_dirty(vcpu, gfn);
spte |= shadow_dirty_mask; spte |= shadow_dirty_mask;
} }
...@@ -2860,7 +2860,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) ...@@ -2860,7 +2860,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
return 1; return 1;
if (pfn == KVM_PFN_ERR_HWPOISON) { if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
return 0; return 0;
} }
...@@ -2883,7 +2883,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, ...@@ -2883,7 +2883,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) && PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask; unsigned long mask;
/* /*
* mmu_notifier_retry was successful and we hold the * mmu_notifier_retry was successful and we hold the
...@@ -2975,7 +2975,7 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -2975,7 +2975,7 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* Compare with set_spte where instead shadow_dirty_mask is set. * Compare with set_spte where instead shadow_dirty_mask is set.
*/ */
if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
mark_page_dirty(vcpu->kvm, gfn); kvm_vcpu_mark_page_dirty(vcpu, gfn);
return true; return true;
} }
...@@ -3430,7 +3430,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) ...@@ -3430,7 +3430,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
gfn_t gfn = get_mmio_spte_gfn(spte); gfn_t gfn = get_mmio_spte_gfn(spte);
unsigned access = get_mmio_spte_access(spte); unsigned access = get_mmio_spte_access(spte);
if (!check_mmio_spte(vcpu->kvm, spte)) if (!check_mmio_spte(vcpu, spte))
return RET_MMIO_PF_INVALID; return RET_MMIO_PF_INVALID;
if (direct) if (direct)
...@@ -3502,7 +3502,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) ...@@ -3502,7 +3502,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
arch.direct_map = vcpu->arch.mmu.direct_map; arch.direct_map = vcpu->arch.mmu.direct_map;
arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
} }
static bool can_do_async_pf(struct kvm_vcpu *vcpu) static bool can_do_async_pf(struct kvm_vcpu *vcpu)
...@@ -3520,7 +3520,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, ...@@ -3520,7 +3520,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
bool async; bool async;
slot = gfn_to_memslot(vcpu->kvm, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
async = false; async = false;
*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
if (!async) if (!async)
...@@ -3633,7 +3633,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -3633,7 +3633,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
vcpu->arch.mmu.inject_page_fault(vcpu, fault); vcpu->arch.mmu.inject_page_fault(vcpu, fault);
} }
static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned access, int *nr_present) unsigned access, int *nr_present)
{ {
if (unlikely(is_mmio_spte(*sptep))) { if (unlikely(is_mmio_spte(*sptep))) {
...@@ -3643,7 +3643,7 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, ...@@ -3643,7 +3643,7 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
} }
(*nr_present)++; (*nr_present)++;
mark_mmio_spte(kvm, sptep, gfn, access); mark_mmio_spte(vcpu, sptep, gfn, access);
return true; return true;
} }
...@@ -4153,7 +4153,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, ...@@ -4153,7 +4153,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */ /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7; *gpa &= ~(gpa_t)7;
*bytes = 8; *bytes = 8;
r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8); r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
if (r) if (r)
gentry = 0; gentry = 0;
new = (const u8 *)&gentry; new = (const u8 *)&gentry;
...@@ -4779,13 +4779,13 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) ...@@ -4779,13 +4779,13 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
} }
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
{ {
/* /*
* The very rare case: if the generation-number is round, * The very rare case: if the generation-number is round,
* zap all shadow pages. * zap all shadow pages.
*/ */
if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n"); printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm); kvm_mmu_invalidate_zap_all_pages(kvm);
} }
......
...@@ -114,7 +114,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -114,7 +114,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
return; return;
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
if (is_error_pfn(pfn)) if (is_error_pfn(pfn))
return; return;
......
...@@ -256,7 +256,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, ...@@ -256,7 +256,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
if (ret) if (ret)
return ret; return ret;
mark_page_dirty(vcpu->kvm, table_gfn); kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
walker->ptes[level] = pte; walker->ptes[level] = pte;
} }
return 0; return 0;
...@@ -338,7 +338,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -338,7 +338,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
real_gfn = gpa_to_gfn(real_gfn); real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn, host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
&walker->pte_writable[walker->level - 1]); &walker->pte_writable[walker->level - 1]);
if (unlikely(kvm_is_error_hva(host_addr))) if (unlikely(kvm_is_error_hva(host_addr)))
goto error; goto error;
...@@ -511,11 +511,11 @@ static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, ...@@ -511,11 +511,11 @@ static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
base_gpa = pte_gpa & ~mask; base_gpa = pte_gpa & ~mask;
index = (pte_gpa - base_gpa) / sizeof(pt_element_t); index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
r = kvm_read_guest_atomic(vcpu->kvm, base_gpa, r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
curr_pte = gw->prefetch_ptes[index]; curr_pte = gw->prefetch_ptes[index];
} else } else
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
&curr_pte, sizeof(curr_pte)); &curr_pte, sizeof(curr_pte));
return r || curr_pte != gw->ptes[level - 1]; return r || curr_pte != gw->ptes[level - 1];
...@@ -869,8 +869,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -869,8 +869,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (!rmap_can_add(vcpu)) if (!rmap_can_add(vcpu))
break; break;
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
sizeof(pt_element_t))) sizeof(pt_element_t)))
break; break;
FNAME(update_pte)(vcpu, sp, sptep, &gpte); FNAME(update_pte)(vcpu, sp, sptep, &gpte);
...@@ -956,8 +956,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -956,8 +956,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
sizeof(pt_element_t))) sizeof(pt_element_t)))
return -EINVAL; return -EINVAL;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
...@@ -970,7 +970,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -970,7 +970,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
pte_access &= FNAME(gpte_access)(vcpu, gpte); pte_access &= FNAME(gpte_access)(vcpu, gpte);
FNAME(protect_clean_gpte)(&pte_access, gpte); FNAME(protect_clean_gpte)(&pte_access, gpte);
if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access, if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present)) &nr_present))
continue; continue;
......
...@@ -1955,8 +1955,8 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) ...@@ -1955,8 +1955,8 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
u64 pdpte; u64 pdpte;
int ret; int ret;
ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte, ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
offset_in_page(cr3) + index * 8, 8); offset_in_page(cr3) + index * 8, 8);
if (ret) if (ret)
return 0; return 0;
return pdpte; return pdpte;
...@@ -2114,7 +2114,7 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) ...@@ -2114,7 +2114,7 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
might_sleep(); might_sleep();
page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
if (is_error_page(page)) if (is_error_page(page))
goto error; goto error;
...@@ -2153,7 +2153,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm) ...@@ -2153,7 +2153,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
mask = (0xf >> (4 - size)) << start_bit; mask = (0xf >> (4 - size)) << start_bit;
val = 0; val = 0;
if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len)) if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
return NESTED_EXIT_DONE; return NESTED_EXIT_DONE;
return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
...@@ -2178,7 +2178,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) ...@@ -2178,7 +2178,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
/* Offset is in 32 bit units but need in 8 bit units */ /* Offset is in 32 bit units but need in 8 bit units */
offset *= 4; offset *= 4;
if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4)) if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
return NESTED_EXIT_DONE; return NESTED_EXIT_DONE;
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
...@@ -2449,7 +2449,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) ...@@ -2449,7 +2449,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
p = msrpm_offsets[i]; p = msrpm_offsets[i];
offset = svm->nested.vmcb_msrpm + (p * 4); offset = svm->nested.vmcb_msrpm + (p * 4);
if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4)) if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
return false; return false;
svm->nested.msrpm[p] = svm->msrpm[p] | value; svm->nested.msrpm[p] = svm->msrpm[p] | value;
......
...@@ -786,7 +786,7 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) ...@@ -786,7 +786,7 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
{ {
struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT);
if (is_error_page(page)) if (is_error_page(page))
return NULL; return NULL;
...@@ -7323,7 +7323,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, ...@@ -7323,7 +7323,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
bitmap += (port & 0x7fff) / 8; bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap) if (last_bitmap != bitmap)
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
return true; return true;
if (b & (1 << (port & 7))) if (b & (1 << (port & 7)))
return true; return true;
...@@ -7367,7 +7367,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, ...@@ -7367,7 +7367,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
/* Then read the msr_index'th bit from this bitmap: */ /* Then read the msr_index'th bit from this bitmap: */
if (msr_index < 1024*8) { if (msr_index < 1024*8) {
unsigned char b; unsigned char b;
if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
return true; return true;
return 1 & (b >> (msr_index & 7)); return 1 & (b >> (msr_index & 7));
} else } else
...@@ -7632,9 +7632,9 @@ static void vmx_disable_pml(struct vcpu_vmx *vmx) ...@@ -7632,9 +7632,9 @@ static void vmx_disable_pml(struct vcpu_vmx *vmx)
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
} }
static void vmx_flush_pml_buffer(struct vcpu_vmx *vmx) static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vmx->vcpu.kvm; struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 *pml_buf; u64 *pml_buf;
u16 pml_idx; u16 pml_idx;
...@@ -7656,7 +7656,7 @@ static void vmx_flush_pml_buffer(struct vcpu_vmx *vmx) ...@@ -7656,7 +7656,7 @@ static void vmx_flush_pml_buffer(struct vcpu_vmx *vmx)
gpa = pml_buf[pml_idx]; gpa = pml_buf[pml_idx];
WARN_ON(gpa & (PAGE_SIZE - 1)); WARN_ON(gpa & (PAGE_SIZE - 1));
mark_page_dirty(kvm, gpa >> PAGE_SHIFT); kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
} }
/* reset PML index */ /* reset PML index */
...@@ -7851,7 +7851,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) ...@@ -7851,7 +7851,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
* flushed already. * flushed already.
*/ */
if (enable_pml) if (enable_pml)
vmx_flush_pml_buffer(vmx); vmx_flush_pml_buffer(vcpu);
/* If guest state is invalid, start emulating */ /* If guest state is invalid, start emulating */
if (vmx->emulation_required) if (vmx->emulation_required)
...@@ -9109,8 +9109,8 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -9109,8 +9109,8 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
msr.host_initiated = false; msr.host_initiated = false;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
&e, sizeof(e))) { &e, sizeof(e))) {
pr_warn_ratelimited( pr_warn_ratelimited(
"%s cannot read MSR entry (%u, 0x%08llx)\n", "%s cannot read MSR entry (%u, 0x%08llx)\n",
__func__, i, gpa + i * sizeof(e)); __func__, i, gpa + i * sizeof(e));
...@@ -9143,9 +9143,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -9143,9 +9143,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct msr_data msr_info; struct msr_data msr_info;
if (kvm_read_guest(vcpu->kvm, if (kvm_vcpu_read_guest(vcpu,
gpa + i * sizeof(e), gpa + i * sizeof(e),
&e, 2 * sizeof(u32))) { &e, 2 * sizeof(u32))) {
pr_warn_ratelimited( pr_warn_ratelimited(
"%s cannot read MSR entry (%u, 0x%08llx)\n", "%s cannot read MSR entry (%u, 0x%08llx)\n",
__func__, i, gpa + i * sizeof(e)); __func__, i, gpa + i * sizeof(e));
...@@ -9165,10 +9165,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -9165,10 +9165,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index); __func__, i, e.index);
return -EINVAL; return -EINVAL;
} }
if (kvm_write_guest(vcpu->kvm, if (kvm_vcpu_write_guest(vcpu,
gpa + i * sizeof(e) + gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value), offsetof(struct vmx_msr_entry, value),
&msr_info.data, sizeof(msr_info.data))) { &msr_info.data, sizeof(msr_info.data))) {
pr_warn_ratelimited( pr_warn_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n", "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, msr_info.data); __func__, i, e.index, msr_info.data);
......
...@@ -478,7 +478,7 @@ EXPORT_SYMBOL_GPL(kvm_require_dr); ...@@ -478,7 +478,7 @@ EXPORT_SYMBOL_GPL(kvm_require_dr);
/* /*
* This function will be used to read from the physical memory of the currently * This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_read_guest_page is that this function * running guest. The difference to kvm_vcpu_read_guest_page is that this function
* can read from guest physical or from the guest's guest physical memory. * can read from guest physical or from the guest's guest physical memory.
*/ */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
...@@ -496,7 +496,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -496,7 +496,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
real_gfn = gpa_to_gfn(real_gfn); real_gfn = gpa_to_gfn(real_gfn);
return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
} }
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
...@@ -2030,7 +2030,7 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) ...@@ -2030,7 +2030,7 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
r = PTR_ERR(page); r = PTR_ERR(page);
goto out; goto out;
} }
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
goto out_free; goto out_free;
r = 0; r = 0;
out_free: out_free:
...@@ -2130,13 +2130,13 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -2130,13 +2130,13 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break; break;
} }
gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(vcpu->kvm, gfn); addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
if (kvm_is_error_hva(addr)) if (kvm_is_error_hva(addr))
return 1; return 1;
if (__clear_user((void __user *)addr, PAGE_SIZE)) if (__clear_user((void __user *)addr, PAGE_SIZE))
return 1; return 1;
vcpu->arch.hv_vapic = data; vcpu->arch.hv_vapic = data;
mark_page_dirty(vcpu->kvm, gfn); kvm_vcpu_mark_page_dirty(vcpu, gfn);
if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
return 1; return 1;
break; break;
...@@ -4425,8 +4425,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, ...@@ -4425,8 +4425,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
offset, toread); offset, toread);
if (ret < 0) { if (ret < 0) {
r = X86EMUL_IO_NEEDED; r = X86EMUL_IO_NEEDED;
goto out; goto out;
...@@ -4459,8 +4459,8 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, ...@@ -4459,8 +4459,8 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
offset = addr & (PAGE_SIZE-1); offset = addr & (PAGE_SIZE-1);
if (WARN_ON(offset + bytes > PAGE_SIZE)) if (WARN_ON(offset + bytes > PAGE_SIZE))
bytes = (unsigned)PAGE_SIZE - offset; bytes = (unsigned)PAGE_SIZE - offset;
ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
offset, bytes); offset, bytes);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return X86EMUL_IO_NEEDED; return X86EMUL_IO_NEEDED;
...@@ -4506,7 +4506,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, ...@@ -4506,7 +4506,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
if (ret < 0) { if (ret < 0) {
r = X86EMUL_IO_NEEDED; r = X86EMUL_IO_NEEDED;
goto out; goto out;
...@@ -4559,7 +4559,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -4559,7 +4559,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
{ {
int ret; int ret;
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
if (ret < 0) if (ret < 0)
return 0; return 0;
kvm_mmu_pte_write(vcpu, gpa, val, bytes); kvm_mmu_pte_write(vcpu, gpa, val, bytes);
...@@ -4593,7 +4593,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) ...@@ -4593,7 +4593,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes) void *val, int bytes)
{ {
return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
} }
static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
...@@ -4791,7 +4791,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -4791,7 +4791,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
goto emul_write; goto emul_write;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
if (is_error_page(page)) if (is_error_page(page))
goto emul_write; goto emul_write;
...@@ -4819,7 +4819,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -4819,7 +4819,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (!exchanged) if (!exchanged)
return X86EMUL_CMPXCHG_FAILED; return X86EMUL_CMPXCHG_FAILED;
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
kvm_mmu_pte_write(vcpu, gpa, new, bytes); kvm_mmu_pte_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
...@@ -6570,7 +6570,7 @@ static void process_smi(struct kvm_vcpu *vcpu) ...@@ -6570,7 +6570,7 @@ static void process_smi(struct kvm_vcpu *vcpu)
else else
process_smi_save_state_32(vcpu, buf); process_smi_save_state_32(vcpu, buf);
kvm_write_guest(vcpu->kvm, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
if (kvm_x86_ops->get_nmi_mask(vcpu)) if (kvm_x86_ops->get_nmi_mask(vcpu))
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
...@@ -8075,7 +8075,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) ...@@ -8075,7 +8075,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
* memslots->generation has been incremented. * memslots->generation has been incremented.
* mmio generation may have reached its maximum value. * mmio generation may have reached its maximum value.
*/ */
kvm_mmu_invalidate_mmio_sptes(kvm); kvm_mmu_invalidate_mmio_sptes(kvm, slots);
} }
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment