Commit ba4e6279 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-ppc-next-5.8-1' of...

Merge tag 'kvm-ppc-next-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD

PPC KVM update for 5.8

- Updates and bug fixes for secure guest support
- Other minor bug fixes and cleanups.
parents 3741679b 11362b1b
...@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) ...@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
struct kvm_vcpu *vcpu, unsigned long addr, unsigned long addr, unsigned long status);
unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid); unsigned long slb_v, unsigned long valid);
extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store); unsigned long gpa, gva_t ea, int is_store);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
...@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void); ...@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void); extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr); unsigned long ea, unsigned long dsisr);
extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from, gva_t eaddr, void *to, void *from,
...@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); ...@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val); bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable); bool writing, bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
...@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); ...@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm); void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr); u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
struct hv_guest_state *hr); struct hv_guest_state *hr);
long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu); long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
......
...@@ -795,7 +795,6 @@ struct kvm_vcpu_arch { ...@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache; struct mmio_hpte_cache_entry *pgfault_cache;
struct task_struct *run_task; struct task_struct *run_task;
struct kvm_run *kvm_run;
spinlock_t vpa_update_lock; spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa; struct kvmppc_vpa vpa;
......
...@@ -58,28 +58,28 @@ enum xlate_readwrite { ...@@ -58,28 +58,28 @@ enum xlate_readwrite {
XLATE_WRITE /* check for write permissions */ XLATE_WRITE /* check for write permissions */
}; };
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_handler_highmem(void); extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend); int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian); unsigned int rt, unsigned int bytes, int is_default_endian);
extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian); unsigned int rs, unsigned int bytes, int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, u64 val, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int rs, unsigned int bytes,
int is_default_endian); int is_default_endian);
...@@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, ...@@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data); bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data); bool data);
extern int kvmppc_emulate_instruction(struct kvm_run *run, extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
...@@ -267,7 +266,7 @@ struct kvmppc_ops { ...@@ -267,7 +266,7 @@ struct kvmppc_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); int (*vcpu_run)(struct kvm_vcpu *vcpu);
int (*vcpu_create)(struct kvm_vcpu *vcpu); int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu); void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu); int (*check_requests)(struct kvm_vcpu *vcpu);
...@@ -291,7 +290,7 @@ struct kvmppc_ops { ...@@ -291,7 +290,7 @@ struct kvmppc_ops {
int (*init_vm)(struct kvm *kvm); int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm);
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, int (*emulate_op)(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
......
...@@ -755,9 +755,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -755,9 +755,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
} }
EXPORT_SYMBOL_GPL(kvmppc_set_msr); EXPORT_SYMBOL_GPL(kvmppc_set_msr);
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{ {
return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
} }
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
......
...@@ -18,7 +18,7 @@ extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); ...@@ -18,7 +18,7 @@ extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong spr_val); int sprn, ulong spr_val);
......
...@@ -413,7 +413,7 @@ static int instruction_is_store(unsigned int instr) ...@@ -413,7 +413,7 @@ static int instruction_is_store(unsigned int instr)
return (instr & mask) != 0; return (instr & mask) != 0;
} }
int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store) unsigned long gpa, gva_t ea, int is_store)
{ {
u32 last_inst; u32 last_inst;
...@@ -473,10 +473,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -473,10 +473,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.paddr_accessed = gpa; vcpu->arch.paddr_accessed = gpa;
vcpu->arch.vaddr_accessed = ea; vcpu->arch.vaddr_accessed = ea;
return kvmppc_emulate_mmio(run, vcpu); return kvmppc_emulate_mmio(vcpu);
} }
int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr) unsigned long ea, unsigned long dsisr)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -499,7 +499,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -499,7 +499,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte_t pte, *ptep; pte_t pte, *ptep;
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
/* /*
* Real-mode code has already searched the HPT and found the * Real-mode code has already searched the HPT and found the
...@@ -519,7 +519,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -519,7 +519,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_base = r & HPTE_R_RPN & ~(psize - 1); gpa_base = r & HPTE_R_RPN & ~(psize - 1);
gfn_base = gpa_base >> PAGE_SHIFT; gfn_base = gpa_base >> PAGE_SHIFT;
gpa = gpa_base | (ea & (psize - 1)); gpa = gpa_base | (ea & (psize - 1));
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE); dsisr & DSISR_ISSTORE);
} }
} }
...@@ -555,7 +555,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -555,7 +555,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */ /* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE); dsisr & DSISR_ISSTORE);
/* /*
......
...@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache; ...@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache;
static pte_t *kvmppc_pte_alloc(void) static pte_t *kvmppc_pte_alloc(void)
{ {
return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); pte_t *pte;
pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
/* pmd_populate() will only reference _pa(pte). */
kmemleak_ignore(pte);
return pte;
} }
static void kvmppc_pte_free(pte_t *ptep) static void kvmppc_pte_free(pte_t *ptep)
...@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep) ...@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep)
static pmd_t *kvmppc_pmd_alloc(void) static pmd_t *kvmppc_pmd_alloc(void)
{ {
return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); pmd_t *pmd;
pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
/* pud_populate() will only reference _pa(pmd). */
kmemleak_ignore(pmd);
return pmd;
} }
static void kvmppc_pmd_free(pmd_t *pmdp) static void kvmppc_pmd_free(pmd_t *pmdp)
...@@ -417,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, ...@@ -417,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* Callers are responsible for flushing the PWC. * Callers are responsible for flushing the PWC.
* *
* When page tables are being unmapped/freed as part of page fault path * When page tables are being unmapped/freed as part of page fault path
* (full == false), ptes are not expected. There is code to unmap them * (full == false), valid ptes are generally not expected; however, there
* and emit a warning if encountered, but there may already be data * is one situation where they arise, which is when dirty page logging is
* corruption due to the unexpected mappings. * turned off for a memslot while the VM is running. The new memslot
* becomes visible to page faults before the memslot commit function
* gets to flush the memslot, which can lead to a 2MB page mapping being
* installed for a guest physical address where there are already 64kB
* (or 4kB) mappings (of sub-pages of the same 2MB page).
*/ */
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid) unsigned int lpid)
...@@ -433,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, ...@@ -433,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
if (pte_val(*p) == 0) if (pte_val(*p) == 0)
continue; continue;
WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p, kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT, pte_pfn(*p) << PAGE_SHIFT,
PAGE_SHIFT, NULL, lpid); PAGE_SHIFT, NULL, lpid);
...@@ -887,7 +902,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, ...@@ -887,7 +902,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr) unsigned long ea, unsigned long dsisr)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -933,7 +948,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -933,7 +948,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
} }
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
...@@ -1115,6 +1130,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, ...@@ -1115,6 +1130,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm->arch.lpid); kvm->arch.lpid);
gpa += PAGE_SIZE; gpa += PAGE_SIZE;
} }
/*
* Increase the mmu notifier sequence number to prevent any page
* fault that read the memslot earlier from writing a PTE.
*/
kvm->mmu_notifier_seq++;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
......
...@@ -73,6 +73,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, ...@@ -73,6 +73,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
struct kvmppc_spapr_tce_iommu_table *stit, *tmp; struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
struct iommu_table_group *table_group = NULL; struct iommu_table_group *table_group = NULL;
rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
table_group = iommu_group_get_iommudata(grp); table_group = iommu_group_get_iommudata(grp);
...@@ -87,7 +88,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, ...@@ -87,7 +88,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
kref_put(&stit->kref, kvm_spapr_tce_liobn_put); kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
} }
} }
cond_resched_rcu();
} }
rcu_read_unlock();
} }
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
...@@ -105,12 +108,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, ...@@ -105,12 +108,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!f.file) if (!f.file)
return -EBADF; return -EBADF;
rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt == f.file->private_data) { if (stt == f.file->private_data) {
found = true; found = true;
break; break;
} }
} }
rcu_read_unlock();
fdput(f); fdput(f);
...@@ -143,6 +148,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, ...@@ -143,6 +148,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!tbl) if (!tbl)
return -EINVAL; return -EINVAL;
rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
if (tbl != stit->tbl) if (tbl != stit->tbl)
continue; continue;
...@@ -150,14 +156,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, ...@@ -150,14 +156,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!kref_get_unless_zero(&stit->kref)) { if (!kref_get_unless_zero(&stit->kref)) {
/* stit is being destroyed */ /* stit is being destroyed */
iommu_tce_table_put(tbl); iommu_tce_table_put(tbl);
rcu_read_unlock();
return -ENOTTY; return -ENOTTY;
} }
/* /*
* The table is already known to this KVM, we just increased * The table is already known to this KVM, we just increased
* its KVM reference counter and can return. * its KVM reference counter and can return.
*/ */
rcu_read_unlock();
return 0; return 0;
} }
rcu_read_unlock();
stit = kzalloc(sizeof(*stit), GFP_KERNEL); stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) { if (!stit) {
...@@ -365,18 +374,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, ...@@ -365,18 +374,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD; return H_TOO_HARD;
rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
unsigned long hpa = 0; unsigned long hpa = 0;
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
long shift = stit->tbl->it_page_shift; long shift = stit->tbl->it_page_shift;
mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
if (!mem) if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
return H_TOO_HARD; rcu_read_unlock();
if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
return H_TOO_HARD; return H_TOO_HARD;
}
} }
rcu_read_unlock();
return H_SUCCESS; return H_SUCCESS;
} }
......
...@@ -235,7 +235,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) ...@@ -235,7 +235,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
#endif #endif
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -371,13 +371,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -371,13 +371,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
break; break;
run->papr_hcall.nr = cmd; vcpu->run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) { for (i = 0; i < 9; ++i) {
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
run->papr_hcall.args[i] = gpr; vcpu->run->papr_hcall.args[i] = gpr;
} }
run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1; vcpu->arch.hcall_needed = 1;
emulated = EMULATE_EXIT_USER; emulated = EMULATE_EXIT_USER;
break; break;
...@@ -629,7 +629,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -629,7 +629,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
if (emulated == EMULATE_FAIL) if (emulated == EMULATE_FAIL)
emulated = kvmppc_emulate_paired_single(run, vcpu); emulated = kvmppc_emulate_paired_single(vcpu);
return emulated; return emulated;
} }
......
...@@ -1097,9 +1097,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -1097,9 +1097,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = kvmppc_h_svm_init_done(vcpu->kvm); ret = kvmppc_h_svm_init_done(vcpu->kvm);
break; break;
case H_SVM_INIT_ABORT: case H_SVM_INIT_ABORT:
ret = H_UNSUPPORTED; /*
if (kvmppc_get_srr1(vcpu) & MSR_S) * Even if that call is made by the Ultravisor, the SSR1 value
ret = kvmppc_h_svm_init_abort(vcpu->kvm); * is the guest context one, with the secure bit clear as it has
* not yet been secured. So we can't check it here.
* Instead the kvm->arch.secure_guest flag is checked inside
* kvmppc_h_svm_init_abort().
*/
ret = kvmppc_h_svm_init_abort(vcpu->kvm);
break; break;
default: default:
...@@ -1154,8 +1159,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) ...@@ -1154,8 +1159,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
return kvmppc_hcall_impl_hv_realmode(cmd); return kvmppc_hcall_impl_hv_realmode(cmd);
} }
static int kvmppc_emulate_debug_inst(struct kvm_run *run, static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
u32 last_inst; u32 last_inst;
...@@ -1169,8 +1173,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run, ...@@ -1169,8 +1173,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run,
} }
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = kvmppc_get_pc(vcpu); vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST; return RESUME_HOST;
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
...@@ -1271,9 +1275,10 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) ...@@ -1271,9 +1275,10 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST; return RESUME_GUEST;
} }
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk) struct task_struct *tsk)
{ {
struct kvm_run *run = vcpu->run;
int r = RESUME_HOST; int r = RESUME_HOST;
vcpu->stat.sum_exits++; vcpu->stat.sum_exits++;
...@@ -1408,7 +1413,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1408,7 +1413,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) : swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst; vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(run, vcpu); r = kvmppc_emulate_debug_inst(vcpu);
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -1460,7 +1465,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1460,7 +1465,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r; return r;
} }
static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{ {
int r; int r;
int srcu_idx; int srcu_idx;
...@@ -1518,7 +1523,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1518,7 +1523,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/ */
case BOOK3S_INTERRUPT_H_DATA_STORAGE: case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(run, vcpu); r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break; break;
case BOOK3S_INTERRUPT_H_INST_STORAGE: case BOOK3S_INTERRUPT_H_INST_STORAGE:
...@@ -1528,7 +1533,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1528,7 +1533,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE; vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(run, vcpu); r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break; break;
...@@ -2932,7 +2937,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ...@@ -2932,7 +2937,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
ret = RESUME_GUEST; ret = RESUME_GUEST;
if (vcpu->arch.trap) if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, ret = kvmppc_handle_exit_hv(vcpu,
vcpu->arch.run_task); vcpu->arch.run_task);
vcpu->arch.ret = ret; vcpu->arch.ret = ret;
...@@ -3897,15 +3902,16 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) ...@@ -3897,15 +3902,16 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
return r; return r;
} }
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
int n_ceded, i, r; int n_ceded, i, r;
struct kvmppc_vcore *vc; struct kvmppc_vcore *vc;
struct kvm_vcpu *v; struct kvm_vcpu *v;
trace_kvmppc_run_vcpu_enter(vcpu); trace_kvmppc_run_vcpu_enter(vcpu);
kvm_run->exit_reason = 0; run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST; vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0; vcpu->arch.trap = 0;
kvmppc_update_vpas(vcpu); kvmppc_update_vpas(vcpu);
...@@ -3917,7 +3923,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -3917,7 +3923,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
spin_lock(&vc->lock); spin_lock(&vc->lock);
vcpu->arch.ceded = 0; vcpu->arch.ceded = 0;
vcpu->arch.run_task = current; vcpu->arch.run_task = current;
vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL; vcpu->arch.busy_preempt = TB_NIL;
...@@ -3950,8 +3955,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -3950,8 +3955,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
r = kvmhv_setup_mmu(vcpu); r = kvmhv_setup_mmu(vcpu);
spin_lock(&vc->lock); spin_lock(&vc->lock);
if (r) { if (r) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry. run->fail_entry.
hardware_entry_failure_reason = 0; hardware_entry_failure_reason = 0;
vcpu->arch.ret = r; vcpu->arch.ret = r;
break; break;
...@@ -3970,7 +3975,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -3970,7 +3975,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) { if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v); kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++; v->stat.signal_exits++;
v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR; v->arch.ret = -EINTR;
wake_up(&v->arch.cpu_run); wake_up(&v->arch.cpu_run);
} }
...@@ -4011,7 +4016,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -4011,7 +4016,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu);
vcpu->stat.signal_exits++; vcpu->stat.signal_exits++;
kvm_run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR; vcpu->arch.ret = -EINTR;
} }
...@@ -4022,15 +4027,15 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -4022,15 +4027,15 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run); wake_up(&v->arch.cpu_run);
} }
trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); trace_kvmppc_run_vcpu_exit(vcpu);
spin_unlock(&vc->lock); spin_unlock(&vc->lock);
return vcpu->arch.ret; return vcpu->arch.ret;
} }
int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr) unsigned long lpcr)
{ {
struct kvm_run *run = vcpu->run;
int trap, r, pcpu; int trap, r, pcpu;
int srcu_idx, lpid; int srcu_idx, lpid;
struct kvmppc_vcore *vc; struct kvmppc_vcore *vc;
...@@ -4039,14 +4044,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, ...@@ -4039,14 +4044,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
trace_kvmppc_run_vcpu_enter(vcpu); trace_kvmppc_run_vcpu_enter(vcpu);
kvm_run->exit_reason = 0; run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST; vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0; vcpu->arch.trap = 0;
vc = vcpu->arch.vcore; vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0; vcpu->arch.ceded = 0;
vcpu->arch.run_task = current; vcpu->arch.run_task = current;
vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL; vcpu->arch.busy_preempt = TB_NIL;
...@@ -4164,9 +4168,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, ...@@ -4164,9 +4168,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
r = RESUME_GUEST; r = RESUME_GUEST;
if (trap) { if (trap) {
if (!nested) if (!nested)
r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); r = kvmppc_handle_exit_hv(vcpu, current);
else else
r = kvmppc_handle_nested_exit(kvm_run, vcpu); r = kvmppc_handle_nested_exit(vcpu);
} }
vcpu->arch.ret = r; vcpu->arch.ret = r;
...@@ -4176,7 +4180,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, ...@@ -4176,7 +4180,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
if (signal_pending(current)) { if (signal_pending(current)) {
vcpu->stat.signal_exits++; vcpu->stat.signal_exits++;
kvm_run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR; vcpu->arch.ret = -EINTR;
break; break;
} }
...@@ -4192,13 +4196,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, ...@@ -4192,13 +4196,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
done: done:
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu);
trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret; return vcpu->arch.ret;
sigpend: sigpend:
vcpu->stat.signal_exits++; vcpu->stat.signal_exits++;
kvm_run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR; vcpu->arch.ret = -EINTR;
out: out:
local_irq_enable(); local_irq_enable();
...@@ -4206,8 +4210,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, ...@@ -4206,8 +4210,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
goto done; goto done;
} }
static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
int r; int r;
int srcu_idx; int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */ unsigned long ebb_regs[3] = {}; /* shut up GCC */
...@@ -4291,10 +4296,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -4291,10 +4296,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/ */
if (kvm->arch.threads_indep && kvm_is_radix(kvm) && if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
!no_mixing_hpt_and_radix) !no_mixing_hpt_and_radix)
r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr); vcpu->arch.vcore->lpcr);
else else
r = kvmppc_run_vcpu(run, vcpu); r = kvmppc_run_vcpu(vcpu);
if (run->exit_reason == KVM_EXIT_PAPR_HCALL && if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) { !(vcpu->arch.shregs.msr & MSR_PR)) {
...@@ -4304,7 +4309,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -4304,7 +4309,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) { } else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
r = kvmppc_book3s_hv_page_fault(run, vcpu, r = kvmppc_book3s_hv_page_fault(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
} else if (r == RESUME_PASSTHROUGH) { } else if (r == RESUME_PASSTHROUGH) {
...@@ -4978,7 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) ...@@ -4978,7 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
} }
/* We don't need to emulate any privileged instructions or dcbz */ /* We don't need to emulate any privileged instructions or dcbz */
static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
return EMULATE_FAIL; return EMULATE_FAIL;
......
...@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) ...@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST; r = RESUME_HOST;
break; break;
} }
r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp, r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
lpcr);
} while (is_kvmppc_resume_guest(r)); } while (is_kvmppc_resume_guest(r));
/* save L2 state for return */ /* save L2 state for return */
...@@ -1257,8 +1256,7 @@ static inline int kvmppc_radix_shift_to_level(int shift) ...@@ -1257,8 +1256,7 @@ static inline int kvmppc_radix_shift_to_level(int shift)
} }
/* called with gp->tlb_lock held */ /* called with gp->tlb_lock held */
static long int __kvmhv_nested_page_fault(struct kvm_run *run, static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp) struct kvm_nested_guest *gp)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -1341,7 +1339,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, ...@@ -1341,7 +1339,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
} }
/* passthrough of emulated MMIO case */ /* passthrough of emulated MMIO case */
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
} }
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
...@@ -1416,8 +1414,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, ...@@ -1416,8 +1414,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
if (n_rmap) kfree(n_rmap);
kfree(n_rmap);
if (ret == -EAGAIN) if (ret == -EAGAIN)
ret = RESUME_GUEST; /* Let the guest try again */ ret = RESUME_GUEST; /* Let the guest try again */
...@@ -1428,13 +1425,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, ...@@ -1428,13 +1425,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
return RESUME_GUEST; return RESUME_GUEST;
} }
long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu) long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
{ {
struct kvm_nested_guest *gp = vcpu->arch.nested; struct kvm_nested_guest *gp = vcpu->arch.nested;
long int ret; long int ret;
mutex_lock(&gp->tlb_lock); mutex_lock(&gp->tlb_lock);
ret = __kvmhv_nested_page_fault(run, vcpu, gp); ret = __kvmhv_nested_page_fault(vcpu, gp);
mutex_unlock(&gp->tlb_lock); mutex_unlock(&gp->tlb_lock);
return ret; return ret;
} }
......
...@@ -749,6 +749,20 @@ static u64 kvmppc_get_secmem_size(void) ...@@ -749,6 +749,20 @@ static u64 kvmppc_get_secmem_size(void)
const __be32 *prop; const __be32 *prop;
u64 size = 0; u64 size = 0;
/*
* First try the new ibm,secure-memory nodes which supersede the
* secure-memory-ranges property.
* If we found some, no need to read the deprecated ones.
*/
for_each_compatible_node(np, NULL, "ibm,secure-memory") {
prop = of_get_property(np, "reg", &len);
if (!prop)
continue;
size += of_read_number(prop + 2, 2);
}
if (size)
return size;
np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
if (!np) if (!np)
goto out; goto out;
......
This diff is collapsed.
...@@ -700,7 +700,7 @@ static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) ...@@ -700,7 +700,7 @@ static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
} }
int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
ulong eaddr, int vec) ulong eaddr, int vec)
{ {
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
...@@ -795,7 +795,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -795,7 +795,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* The guest's PTE is not mapped yet. Map on the host */ /* The guest's PTE is not mapped yet. Map on the host */
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
/* Exit KVM if mapping failed */ /* Exit KVM if mapping failed */
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
if (data) if (data)
...@@ -808,7 +808,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -808,7 +808,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.mmio_exits++; vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr; vcpu->arch.paddr_accessed = pte.raddr;
vcpu->arch.vaddr_accessed = pte.eaddr; vcpu->arch.vaddr_accessed = pte.eaddr;
r = kvmppc_emulate_mmio(run, vcpu); r = kvmppc_emulate_mmio(vcpu);
if ( r == RESUME_HOST_NV ) if ( r == RESUME_HOST_NV )
r = RESUME_HOST; r = RESUME_HOST;
} }
...@@ -992,7 +992,7 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) ...@@ -992,7 +992,7 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
enum emulation_result er = EMULATE_FAIL; enum emulation_result er = EMULATE_FAIL;
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) if (!(kvmppc_get_msr(vcpu) & MSR_PR))
er = kvmppc_emulate_instruction(vcpu->run, vcpu); er = kvmppc_emulate_instruction(vcpu);
if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
/* Couldn't emulate, trigger interrupt in guest */ /* Couldn't emulate, trigger interrupt in guest */
...@@ -1089,8 +1089,7 @@ static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) ...@@ -1089,8 +1089,7 @@ static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
} }
} }
static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
unsigned int exit_nr)
{ {
enum emulation_result er; enum emulation_result er;
ulong flags; ulong flags;
...@@ -1124,7 +1123,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1124,7 +1123,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
vcpu->stat.emulated_inst_exits++; vcpu->stat.emulated_inst_exits++;
er = kvmppc_emulate_instruction(run, vcpu); er = kvmppc_emulate_instruction(vcpu);
switch (er) { switch (er) {
case EMULATE_DONE: case EMULATE_DONE:
r = RESUME_GUEST_NV; r = RESUME_GUEST_NV;
...@@ -1139,7 +1138,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1139,7 +1138,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case EMULATE_DO_MMIO: case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO; vcpu->run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST_NV; r = RESUME_HOST_NV;
break; break;
case EMULATE_EXIT_USER: case EMULATE_EXIT_USER:
...@@ -1198,7 +1197,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1198,7 +1197,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* only care about PTEG not found errors, but leave NX alone */ /* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) { if (shadow_srr1 & 0x40000000) {
int idx = srcu_read_lock(&vcpu->kvm->srcu); int idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->stat.sp_instruc++; vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) && } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
...@@ -1248,7 +1247,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1248,7 +1247,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/ */
if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
int idx = srcu_read_lock(&vcpu->kvm->srcu); int idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else { } else {
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
...@@ -1292,7 +1291,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1292,7 +1291,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_PROGRAM:
case BOOK3S_INTERRUPT_H_EMUL_ASSIST: case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); r = kvmppc_exit_pr_progint(vcpu, exit_nr);
break; break;
case BOOK3S_INTERRUPT_SYSCALL: case BOOK3S_INTERRUPT_SYSCALL:
{ {
...@@ -1370,7 +1369,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1370,7 +1369,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
&last_inst); &last_inst);
if (emul == EMULATE_DONE) if (emul == EMULATE_DONE)
r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); r = kvmppc_exit_pr_progint(vcpu, exit_nr);
else else
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -1825,8 +1824,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) ...@@ -1825,8 +1824,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
vfree(vcpu_book3s); vfree(vcpu_book3s);
} }
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
int ret; int ret;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave); unsigned long uninitialized_var(vrsave);
...@@ -1834,7 +1834,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1834,7 +1834,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Check if we can run the vcpu at all */ /* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -1861,7 +1861,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1861,7 +1861,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(run, vcpu);
kvmppc_clear_debug(vcpu); kvmppc_clear_debug(vcpu);
......
...@@ -729,13 +729,14 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) ...@@ -729,13 +729,14 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return r; return r;
} }
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
int ret, s; int ret, s;
struct debug_reg debug; struct debug_reg debug;
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; return -EINVAL;
} }
...@@ -777,7 +778,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -777,7 +778,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.pgdir = vcpu->kvm->mm->pgd; vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(run, vcpu);
/* No need for guest_exit. It's done in handle_exit. /* No need for guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */ We also get here with interrupts enabled. */
...@@ -799,11 +800,11 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -799,11 +800,11 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) static int emulation_exit(struct kvm_vcpu *vcpu)
{ {
enum emulation_result er; enum emulation_result er;
er = kvmppc_emulate_instruction(run, vcpu); er = kvmppc_emulate_instruction(vcpu);
switch (er) { switch (er) {
case EMULATE_DONE: case EMULATE_DONE:
/* don't overwrite subtypes, just account kvm_stats */ /* don't overwrite subtypes, just account kvm_stats */
...@@ -820,8 +821,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -820,8 +821,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and /* For debugging, encode the failing instruction and
* report it to userspace. */ * report it to userspace. */
run->hw.hardware_exit_reason = ~0ULL << 32; vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
run->hw.hardware_exit_reason |= vcpu->arch.last_inst; vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL); kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST; return RESUME_HOST;
...@@ -833,8 +834,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -833,8 +834,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
} }
static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
u32 dbsr = vcpu->arch.dbsr; u32 dbsr = vcpu->arch.dbsr;
...@@ -953,7 +955,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, ...@@ -953,7 +955,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
} }
} }
static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
enum emulation_result emulated, u32 last_inst) enum emulation_result emulated, u32 last_inst)
{ {
switch (emulated) { switch (emulated) {
...@@ -965,8 +967,8 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -965,8 +967,8 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
__func__, vcpu->arch.regs.nip); __func__, vcpu->arch.regs.nip);
/* For debugging, encode the failing instruction and /* For debugging, encode the failing instruction and
* report it to userspace. */ * report it to userspace. */
run->hw.hardware_exit_reason = ~0ULL << 32; vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
run->hw.hardware_exit_reason |= last_inst; vcpu->run->hw.hardware_exit_reason |= last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL); kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST; return RESUME_HOST;
...@@ -1023,7 +1025,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1023,7 +1025,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
if (emulated != EMULATE_DONE) { if (emulated != EMULATE_DONE) {
r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst); r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
goto out; goto out;
} }
...@@ -1083,7 +1085,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1083,7 +1085,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case BOOKE_INTERRUPT_HV_PRIV: case BOOKE_INTERRUPT_HV_PRIV:
r = emulation_exit(run, vcpu); r = emulation_exit(vcpu);
break; break;
case BOOKE_INTERRUPT_PROGRAM: case BOOKE_INTERRUPT_PROGRAM:
...@@ -1093,7 +1095,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1093,7 +1095,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* We are here because of an SW breakpoint instr, * We are here because of an SW breakpoint instr,
* so lets return to host to handle. * so lets return to host to handle.
*/ */
r = kvmppc_handle_debug(run, vcpu); r = kvmppc_handle_debug(vcpu);
run->exit_reason = KVM_EXIT_DEBUG; run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS); kvmppc_account_exit(vcpu, DEBUG_EXITS);
break; break;
...@@ -1114,7 +1116,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1114,7 +1116,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
} }
r = emulation_exit(run, vcpu); r = emulation_exit(vcpu);
break; break;
case BOOKE_INTERRUPT_FP_UNAVAIL: case BOOKE_INTERRUPT_FP_UNAVAIL:
...@@ -1281,7 +1283,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1281,7 +1283,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* actually RAM. */ * actually RAM. */
vcpu->arch.paddr_accessed = gpaddr; vcpu->arch.paddr_accessed = gpaddr;
vcpu->arch.vaddr_accessed = eaddr; vcpu->arch.vaddr_accessed = eaddr;
r = kvmppc_emulate_mmio(run, vcpu); r = kvmppc_emulate_mmio(vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS); kvmppc_account_exit(vcpu, MMIO_EXITS);
} }
...@@ -1332,7 +1334,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1332,7 +1334,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
case BOOKE_INTERRUPT_DEBUG: { case BOOKE_INTERRUPT_DEBUG: {
r = kvmppc_handle_debug(run, vcpu); r = kvmppc_handle_debug(vcpu);
if (r == RESUME_HOST) if (r == RESUME_HOST)
run->exit_reason = KVM_EXIT_DEBUG; run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS); kvmppc_account_exit(vcpu, DEBUG_EXITS);
......
...@@ -70,7 +70,7 @@ void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); ...@@ -70,7 +70,7 @@ void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
...@@ -94,16 +94,12 @@ enum int_class { ...@@ -94,16 +94,12 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, extern int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val); ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val); ulong *spr_val);
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val); ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
......
...@@ -39,7 +39,7 @@ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) ...@@ -39,7 +39,7 @@ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.csrr1); kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
} }
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
......
...@@ -83,16 +83,16 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) ...@@ -83,16 +83,16 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
} }
#endif #endif
static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (get_oc(inst)) { switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG: case EHPRIV_OC_DEBUG:
run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = vcpu->arch.regs.nip; vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
run->debug.arch.status = 0; vcpu->run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS); kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER; emulated = EMULATE_EXIT_USER;
*advance = 0; *advance = 0;
...@@ -125,7 +125,7 @@ static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst, ...@@ -125,7 +125,7 @@ static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
return EMULATE_FAIL; return EMULATE_FAIL;
} }
int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -182,8 +182,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -182,8 +182,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_EHPRIV: case XOP_EHPRIV:
emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst, emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
advance);
break; break;
default: default:
...@@ -197,7 +196,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -197,7 +196,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
if (emulated == EMULATE_FAIL) if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
return emulated; return emulated;
} }
......
...@@ -191,7 +191,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -191,7 +191,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
/* XXX Should probably auto-generate instruction decoding for a particular core /* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */ * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
{ {
u32 inst; u32 inst;
int rs, rt, sprn; int rs, rt, sprn;
...@@ -270,9 +270,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -270,9 +270,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
* these are illegal instructions. * these are illegal instructions.
*/ */
if (inst == KVMPPC_INST_SW_BREAKPOINT) { if (inst == KVMPPC_INST_SW_BREAKPOINT) {
run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.status = 0; vcpu->run->debug.arch.status = 0;
run->debug.arch.address = kvmppc_get_pc(vcpu); vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
emulated = EMULATE_EXIT_USER; emulated = EMULATE_EXIT_USER;
advance = 0; advance = 0;
} else } else
...@@ -285,7 +285,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -285,7 +285,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
if (emulated == EMULATE_FAIL) { if (emulated == EMULATE_FAIL) {
emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
&advance); &advance);
if (emulated == EMULATE_AGAIN) { if (emulated == EMULATE_AGAIN) {
advance = 0; advance = 0;
......
...@@ -71,7 +71,6 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) ...@@ -71,7 +71,6 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
*/ */
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 inst; u32 inst;
enum emulation_result emulated = EMULATE_FAIL; enum emulation_result emulated = EMULATE_FAIL;
int advance = 1; int advance = 1;
...@@ -104,10 +103,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -104,10 +103,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
int instr_byte_swap = op.type & BYTEREV; int instr_byte_swap = op.type & BYTEREV;
if (op.type & SIGNEXT) if (op.type & SIGNEXT)
emulated = kvmppc_handle_loads(run, vcpu, emulated = kvmppc_handle_loads(vcpu,
op.reg, size, !instr_byte_swap); op.reg, size, !instr_byte_swap);
else else
emulated = kvmppc_handle_load(run, vcpu, emulated = kvmppc_handle_load(vcpu,
op.reg, size, !instr_byte_swap); op.reg, size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
...@@ -124,10 +123,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -124,10 +123,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1; vcpu->arch.mmio_sp64_extend = 1;
if (op.type & SIGNEXT) if (op.type & SIGNEXT)
emulated = kvmppc_handle_loads(run, vcpu, emulated = kvmppc_handle_loads(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1); KVM_MMIO_REG_FPR|op.reg, size, 1);
else else
emulated = kvmppc_handle_load(run, vcpu, emulated = kvmppc_handle_load(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1); KVM_MMIO_REG_FPR|op.reg, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
...@@ -164,12 +163,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -164,12 +163,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) { if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2; vcpu->arch.mmio_vmx_copy_nums = 2;
emulated = kvmppc_handle_vmx_load(run, emulated = kvmppc_handle_vmx_load(vcpu,
vcpu, KVM_MMIO_REG_VMX|op.reg, KVM_MMIO_REG_VMX|op.reg,
8, 1); 8, 1);
} else { } else {
vcpu->arch.mmio_vmx_copy_nums = 1; vcpu->arch.mmio_vmx_copy_nums = 1;
emulated = kvmppc_handle_vmx_load(run, vcpu, emulated = kvmppc_handle_vmx_load(vcpu,
KVM_MMIO_REG_VMX|op.reg, KVM_MMIO_REG_VMX|op.reg,
size, 1); size, 1);
} }
...@@ -217,7 +216,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -217,7 +216,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size; io_size_each = op.element_size;
} }
emulated = kvmppc_handle_vsx_load(run, vcpu, emulated = kvmppc_handle_vsx_load(vcpu,
KVM_MMIO_REG_VSX|op.reg, io_size_each, KVM_MMIO_REG_VSX|op.reg, io_size_each,
1, op.type & SIGNEXT); 1, op.type & SIGNEXT);
break; break;
...@@ -227,8 +226,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -227,8 +226,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* if need byte reverse, op.val has been reversed by /* if need byte reverse, op.val has been reversed by
* analyse_instr(). * analyse_instr().
*/ */
emulated = kvmppc_handle_store(run, vcpu, op.val, emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea); kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
...@@ -250,7 +248,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -250,7 +248,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (op.type & FPCONV) if (op.type & FPCONV)
vcpu->arch.mmio_sp64_extend = 1; vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(vcpu,
VCPU_FPR(vcpu, op.reg), size, 1); VCPU_FPR(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
...@@ -290,12 +288,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -290,12 +288,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) { if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2; vcpu->arch.mmio_vmx_copy_nums = 2;
emulated = kvmppc_handle_vmx_store(run, emulated = kvmppc_handle_vmx_store(vcpu,
vcpu, op.reg, 8, 1); op.reg, 8, 1);
} else { } else {
vcpu->arch.mmio_vmx_copy_nums = 1; vcpu->arch.mmio_vmx_copy_nums = 1;
emulated = kvmppc_handle_vmx_store(run, emulated = kvmppc_handle_vmx_store(vcpu,
vcpu, op.reg, size, 1); op.reg, size, 1);
} }
break; break;
...@@ -338,7 +336,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -338,7 +336,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size; io_size_each = op.element_size;
} }
emulated = kvmppc_handle_vsx_store(run, vcpu, emulated = kvmppc_handle_vsx_store(vcpu,
op.reg, io_size_each, 1); op.reg, io_size_each, 1);
break; break;
} }
......
...@@ -279,7 +279,7 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) ...@@ -279,7 +279,7 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvmppc_sanity_check); EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
{ {
enum emulation_result er; enum emulation_result er;
int r; int r;
...@@ -295,7 +295,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -295,7 +295,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case EMULATE_DO_MMIO: case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO; vcpu->run->exit_reason = KVM_EXIT_MMIO;
/* We must reload nonvolatiles because "update" load/store /* We must reload nonvolatiles because "update" load/store
* instructions modify register state. */ * instructions modify register state. */
/* Future optimization: only reload non-volatiles if they were /* Future optimization: only reload non-volatiles if they were
...@@ -1107,9 +1107,9 @@ static inline u32 dp_to_sp(u64 fprd) ...@@ -1107,9 +1107,9 @@ static inline u32 dp_to_sp(u64 fprd)
#define dp_to_sp(x) (x) #define dp_to_sp(x) (x)
#endif /* CONFIG_PPC_FPU */ #endif /* CONFIG_PPC_FPU */
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
struct kvm_run *run)
{ {
struct kvm_run *run = vcpu->run;
u64 uninitialized_var(gpr); u64 uninitialized_var(gpr);
if (run->mmio.len > sizeof(gpr)) { if (run->mmio.len > sizeof(gpr)) {
...@@ -1219,10 +1219,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -1219,10 +1219,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
} }
} }
static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian, int sign_extend) int is_default_endian, int sign_extend)
{ {
struct kvm_run *run = vcpu->run;
int idx, ret; int idx, ret;
bool host_swabbed; bool host_swabbed;
...@@ -1256,7 +1257,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1256,7 +1257,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) { if (!ret) {
kvmppc_complete_mmio_load(vcpu, run); kvmppc_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
return EMULATE_DONE; return EMULATE_DONE;
} }
...@@ -1264,24 +1265,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1264,24 +1265,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO; return EMULATE_DO_MMIO;
} }
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian) int is_default_endian)
{ {
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
} }
EXPORT_SYMBOL_GPL(kvmppc_handle_load); EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */ /* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian) int is_default_endian)
{ {
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
} }
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend) int is_default_endian, int mmio_sign_extend)
{ {
...@@ -1292,13 +1293,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1292,13 +1293,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL; return EMULATE_FAIL;
while (vcpu->arch.mmio_vsx_copy_nums) { while (vcpu->arch.mmio_vsx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, mmio_sign_extend); is_default_endian, mmio_sign_extend);
if (emulated != EMULATE_DONE) if (emulated != EMULATE_DONE)
break; break;
vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++; vcpu->arch.mmio_vsx_offset++;
...@@ -1307,9 +1308,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1307,9 +1308,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian) u64 val, unsigned int bytes, int is_default_endian)
{ {
struct kvm_run *run = vcpu->run;
void *data = run->mmio.data; void *data = run->mmio.data;
int idx, ret; int idx, ret;
bool host_swabbed; bool host_swabbed;
...@@ -1423,7 +1425,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) ...@@ -1423,7 +1425,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
return result; return result;
} }
int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int is_default_endian) int rs, unsigned int bytes, int is_default_endian)
{ {
u64 val; u64 val;
...@@ -1439,13 +1441,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1439,13 +1441,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL; return EMULATE_FAIL;
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(vcpu,
val, bytes, is_default_endian); val, bytes, is_default_endian);
if (emulated != EMULATE_DONE) if (emulated != EMULATE_DONE)
break; break;
vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++; vcpu->arch.mmio_vsx_offset++;
...@@ -1454,19 +1456,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1454,19 +1456,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated; return emulated;
} }
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
struct kvm_run *run)
{ {
struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL; enum emulation_result emulated = EMULATE_FAIL;
int r; int r;
vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) { if (!vcpu->mmio_is_write) {
emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
run->mmio.len, 1, vcpu->arch.mmio_sign_extend); run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
} else { } else {
emulated = kvmppc_handle_vsx_store(run, vcpu, emulated = kvmppc_handle_vsx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1); vcpu->arch.io_gpr, run->mmio.len, 1);
} }
...@@ -1490,7 +1492,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, ...@@ -1490,7 +1492,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian) unsigned int rt, unsigned int bytes, int is_default_endian)
{ {
enum emulation_result emulated = EMULATE_DONE; enum emulation_result emulated = EMULATE_DONE;
...@@ -1499,13 +1501,13 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1499,13 +1501,13 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL; return EMULATE_FAIL;
while (vcpu->arch.mmio_vmx_copy_nums) { while (vcpu->arch.mmio_vmx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, 0); is_default_endian, 0);
if (emulated != EMULATE_DONE) if (emulated != EMULATE_DONE)
break; break;
vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++; vcpu->arch.mmio_vmx_offset++;
} }
...@@ -1585,7 +1587,7 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) ...@@ -1585,7 +1587,7 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
return result; return result;
} }
int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian) unsigned int rs, unsigned int bytes, int is_default_endian)
{ {
u64 val = 0; u64 val = 0;
...@@ -1620,12 +1622,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1620,12 +1622,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL; return EMULATE_FAIL;
} }
emulated = kvmppc_handle_store(run, vcpu, val, bytes, emulated = kvmppc_handle_store(vcpu, val, bytes,
is_default_endian); is_default_endian);
if (emulated != EMULATE_DONE) if (emulated != EMULATE_DONE)
break; break;
vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++; vcpu->arch.mmio_vmx_offset++;
} }
...@@ -1633,19 +1635,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1633,19 +1635,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated; return emulated;
} }
static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
struct kvm_run *run)
{ {
struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL; enum emulation_result emulated = EMULATE_FAIL;
int r; int r;
vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) { if (!vcpu->mmio_is_write) {
emulated = kvmppc_handle_vmx_load(run, vcpu, emulated = kvmppc_handle_vmx_load(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1); vcpu->arch.io_gpr, run->mmio.len, 1);
} else { } else {
emulated = kvmppc_handle_vmx_store(run, vcpu, emulated = kvmppc_handle_vmx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1); vcpu->arch.io_gpr, run->mmio.len, 1);
} }
...@@ -1775,7 +1777,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -1775,7 +1777,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write) if (!vcpu->mmio_is_write)
kvmppc_complete_mmio_load(vcpu, run); kvmppc_complete_mmio_load(vcpu);
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
if (vcpu->arch.mmio_vsx_copy_nums > 0) { if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_copy_nums--;
...@@ -1783,7 +1785,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -1783,7 +1785,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
} }
if (vcpu->arch.mmio_vsx_copy_nums > 0) { if (vcpu->arch.mmio_vsx_copy_nums > 0) {
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
if (r == RESUME_HOST) { if (r == RESUME_HOST) {
vcpu->mmio_needed = 1; vcpu->mmio_needed = 1;
goto out; goto out;
...@@ -1797,7 +1799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -1797,7 +1799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
} }
if (vcpu->arch.mmio_vmx_copy_nums > 0) { if (vcpu->arch.mmio_vmx_copy_nums > 0) {
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
if (r == RESUME_HOST) { if (r == RESUME_HOST) {
vcpu->mmio_needed = 1; vcpu->mmio_needed = 1;
goto out; goto out;
...@@ -1830,7 +1832,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -1830,7 +1832,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (run->immediate_exit) if (run->immediate_exit)
r = -EINTR; r = -EINTR;
else else
r = kvmppc_vcpu_run(run, vcpu); r = kvmppc_vcpu_run(vcpu);
kvm_sigset_deactivate(vcpu); kvm_sigset_deactivate(vcpu);
......
...@@ -472,9 +472,9 @@ TRACE_EVENT(kvmppc_run_vcpu_enter, ...@@ -472,9 +472,9 @@ TRACE_EVENT(kvmppc_run_vcpu_enter,
); );
TRACE_EVENT(kvmppc_run_vcpu_exit, TRACE_EVENT(kvmppc_run_vcpu_exit,
TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run), TP_PROTO(struct kvm_vcpu *vcpu),
TP_ARGS(vcpu, run), TP_ARGS(vcpu),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, vcpu_id) __field(int, vcpu_id)
...@@ -484,7 +484,7 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, ...@@ -484,7 +484,7 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
TP_fast_assign( TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id; __entry->vcpu_id = vcpu->vcpu_id;
__entry->exit = run->exit_reason; __entry->exit = vcpu->run->exit_reason;
__entry->ret = vcpu->arch.ret; __entry->ret = vcpu->arch.ret;
), ),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment