Commit 4f4af841 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-ppc-next-5.7-1' of...

Merge tag 'kvm-ppc-next-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD

KVM PPC update for 5.7

* Add a capability for enabling secure guests under the Protected
  Execution Framework ultravisor

* Various bug fixes and cleanups.
parents cf39d375 9a5788c6
...@@ -5785,6 +5785,23 @@ it hard or impossible to use it correctly. The availability of ...@@ -5785,6 +5785,23 @@ it hard or impossible to use it correctly. The availability of
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed. KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed.
Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT. Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT.
7.19 KVM_CAP_PPC_SECURE_GUEST
------------------------------
:Architectures: ppc
This capability indicates that KVM is running on a host that has
ultravisor firmware and thus can support a secure guest. On such a
system, a guest can ask the ultravisor to make it a secure guest,
one whose memory is inaccessible to the host except for pages which
are explicitly requested to be shared with the host. The ultravisor
notifies KVM when a guest requests to become a secure guest, and KVM
has the opportunity to veto the transition.
If present, this capability can be enabled for a VM, meaning that KVM
will allow the transition to secure guest mode. Otherwise KVM will
veto the transition.
8. Other capabilities. 8. Other capabilities.
====================== ======================
......
...@@ -150,4 +150,7 @@ ...@@ -150,4 +150,7 @@
#define KVM_INST_FETCH_FAILED -1 #define KVM_INST_FETCH_FAILED -1
/* Extract PO and XOP opcode fields */
#define PO_XOP_OPCODE_MASK 0xfc0007fe
#endif /* __POWERPC_KVM_ASM_H__ */ #endif /* __POWERPC_KVM_ASM_H__ */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifdef CONFIG_PPC_UV #ifdef CONFIG_PPC_UV
int kvmppc_uvmem_init(void); int kvmppc_uvmem_init(void);
void kvmppc_uvmem_free(void); void kvmppc_uvmem_free(void);
bool kvmppc_uvmem_available(void);
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
void kvmppc_uvmem_slot_free(struct kvm *kvm, void kvmppc_uvmem_slot_free(struct kvm *kvm,
const struct kvm_memory_slot *slot); const struct kvm_memory_slot *slot);
...@@ -30,6 +31,11 @@ static inline int kvmppc_uvmem_init(void) ...@@ -30,6 +31,11 @@ static inline int kvmppc_uvmem_init(void)
static inline void kvmppc_uvmem_free(void) { } static inline void kvmppc_uvmem_free(void) { }
static inline bool kvmppc_uvmem_available(void)
{
return false;
}
static inline int static inline int
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{ {
......
...@@ -303,6 +303,7 @@ struct kvm_arch { ...@@ -303,6 +303,7 @@ struct kvm_arch {
u8 radix; u8 radix;
u8 fwnmi_enabled; u8 fwnmi_enabled;
u8 secure_guest; u8 secure_guest;
u8 svm_enabled;
bool threads_indep; bool threads_indep;
bool nested_enable; bool nested_enable;
pgd_t *pgtable; pgd_t *pgtable;
......
...@@ -107,8 +107,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, ...@@ -107,8 +107,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
unsigned int gtlb_idx); unsigned int gtlb_idx);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
...@@ -289,7 +287,6 @@ struct kvmppc_ops { ...@@ -289,7 +287,6 @@ struct kvmppc_ops {
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
int (*test_age_hva)(struct kvm *kvm, unsigned long hva); int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
void (*mmu_destroy)(struct kvm_vcpu *vcpu);
void (*free_memslot)(struct kvm_memory_slot *slot); void (*free_memslot)(struct kvm_memory_slot *slot);
int (*init_vm)(struct kvm *kvm); int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm);
...@@ -316,6 +313,7 @@ struct kvmppc_ops { ...@@ -316,6 +313,7 @@ struct kvmppc_ops {
int size); int size);
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
int size); int size);
int (*enable_svm)(struct kvm *kvm);
int (*svm_off)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm);
}; };
......
...@@ -858,11 +858,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -858,11 +858,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
return 0; return 0;
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
}
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)
{ {
......
...@@ -16,6 +16,7 @@ extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, ...@@ -16,6 +16,7 @@ extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva); extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
......
...@@ -234,7 +234,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -234,7 +234,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
case 2: case 2:
case 6: case 6:
pte->may_write = true; pte->may_write = true;
/* fall through */ fallthrough;
case 3: case 3:
case 5: case 5:
case 7: case 7:
......
...@@ -356,7 +356,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) ...@@ -356,7 +356,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
/* From mm/mmu_context_hash32.c */ /* From mm/mmu_context_hash32.c */
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
int kvmppc_mmu_init(struct kvm_vcpu *vcpu) int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err; int err;
......
...@@ -311,7 +311,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -311,7 +311,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
case 2: case 2:
case 6: case 6:
gpte->may_write = true; gpte->may_write = true;
/* fall through */ fallthrough;
case 3: case 3:
case 5: case 5:
case 7: case 7:
......
...@@ -384,7 +384,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) ...@@ -384,7 +384,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
__destroy_context(to_book3s(vcpu)->context_id[0]); __destroy_context(to_book3s(vcpu)->context_id[0]);
} }
int kvmppc_mmu_init(struct kvm_vcpu *vcpu) int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err; int err;
......
...@@ -485,18 +485,18 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -485,18 +485,18 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
__be64 *hptep; __be64 *hptep;
unsigned long mmu_seq, psize, pte_size; unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base; unsigned long gpa_base, gfn_base;
unsigned long gpa, gfn, hva, pfn; unsigned long gpa, gfn, hva, pfn, hpa;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long *rmap; unsigned long *rmap;
struct revmap_entry *rev; struct revmap_entry *rev;
struct page *page, *pages[1]; struct page *page;
long index, ret, npages; long index, ret;
bool is_ci; bool is_ci;
unsigned int writing, write_ok; bool writing, write_ok;
struct vm_area_struct *vma; unsigned int shift;
unsigned long rcbits; unsigned long rcbits;
long mmio_update; long mmio_update;
struct mm_struct *mm; pte_t pte, *ptep;
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
...@@ -570,59 +570,62 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -570,59 +570,62 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
smp_rmb(); smp_rmb();
ret = -EFAULT; ret = -EFAULT;
is_ci = false;
pfn = 0;
page = NULL; page = NULL;
mm = kvm->mm;
pte_size = PAGE_SIZE;
writing = (dsisr & DSISR_ISSTORE) != 0; writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */ /* If writing != 0, then the HPTE must allow writing, if we get here */
write_ok = writing; write_ok = writing;
hva = gfn_to_hva_memslot(memslot, gfn); hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
if (npages < 1) {
/* Check if it's an I/O mapping */
down_read(&mm->mmap_sem);
vma = find_vma(mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
pfn = vma->vm_pgoff +
((hva - vma->vm_start) >> PAGE_SHIFT);
pte_size = psize;
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
write_ok = vma->vm_flags & VM_WRITE;
}
up_read(&mm->mmap_sem);
if (!pfn)
goto out_put;
} else {
page = pages[0];
pfn = page_to_pfn(page);
if (PageHuge(page)) {
page = compound_head(page);
pte_size <<= compound_order(page);
}
/* if the guest wants write access, see if that is OK */
if (!writing && hpte_is_writable(r)) {
pte_t *ptep, pte;
unsigned long flags;
/* /*
* We need to protect against page table destruction * Do a fast check first, since __gfn_to_pfn_memslot doesn't
* hugepage split and collapse. * do it with !atomic && !async, which is how we call it.
* We always ask for write permission since the common case
* is that the page is writable.
*/ */
local_irq_save(flags); if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL); write_ok = true;
if (ptep) { } else {
pte = kvmppc_read_update_linux_pte(ptep, 1); /* Call KVM generic code to do the slow-path check */
if (__pte_write(pte)) pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
write_ok = 1; writing, &write_ok);
if (is_error_noslot_pfn(pfn))
return -EFAULT;
page = NULL;
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (PageReserved(page))
page = NULL;
} }
local_irq_restore(flags);
} }
/*
* Read the PTE from the process' radix tree and use that
* so we get the shift and attribute bits.
*/
local_irq_disable();
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
/*
* If the PTE disappeared temporarily due to a THP
* collapse, just return and let the guest try again.
*/
if (!ptep) {
local_irq_enable();
if (page)
put_page(page);
return RESUME_GUEST;
} }
pte = *ptep;
local_irq_enable();
hpa = pte_pfn(pte) << PAGE_SHIFT;
pte_size = PAGE_SIZE;
if (shift)
pte_size = 1ul << shift;
is_ci = pte_ci(pte);
if (psize > pte_size) if (psize > pte_size)
goto out_put; goto out_put;
if (pte_size > psize)
hpa |= hva & (pte_size - psize);
/* Check WIMG vs. the actual page we're accessing */ /* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_ci)) { if (!hpte_cache_flags_ok(r, is_ci)) {
...@@ -636,14 +639,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -636,14 +639,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
/* /*
* Set the HPTE to point to pfn. * Set the HPTE to point to hpa.
* Since the pfn is at PAGE_SIZE granularity, make sure we * Since the hpa is at PAGE_SIZE granularity, make sure we
* don't mask out lower-order bits if psize < PAGE_SIZE. * don't mask out lower-order bits if psize < PAGE_SIZE.
*/ */
if (psize < PAGE_SIZE) if (psize < PAGE_SIZE)
psize = PAGE_SIZE; psize = PAGE_SIZE;
r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
((pfn << PAGE_SHIFT) & ~(psize - 1));
if (hpte_is_writable(r) && !write_ok) if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r); r = hpte_make_readonly(r);
ret = RESUME_GUEST; ret = RESUME_GUEST;
...@@ -708,20 +710,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -708,20 +710,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
preempt_enable(); preempt_enable();
if (page && hpte_is_writable(r)) if (page && hpte_is_writable(r))
SetPageDirty(page); set_page_dirty_lock(page);
out_put: out_put:
trace_kvm_page_fault_exit(vcpu, hpte, ret); trace_kvm_page_fault_exit(vcpu, hpte, ret);
if (page) { if (page)
/* put_page(page);
* We drop pages[0] here, not page because page might
* have been set to the head page of a compound, but
* we have to drop the reference on the correct tail
* page to match the get inside gup()
*/
put_page(pages[0]);
}
return ret; return ret;
out_unlock: out_unlock:
......
...@@ -425,7 +425,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, ...@@ -425,7 +425,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid) unsigned int lpid)
{ {
if (full) { if (full) {
memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
} else { } else {
pte_t *p = pte; pte_t *p = pte;
unsigned long it; unsigned long it;
......
...@@ -1073,24 +1073,34 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -1073,24 +1073,34 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6)); kvmppc_get_gpr(vcpu, 6));
break; break;
case H_SVM_PAGE_IN: case H_SVM_PAGE_IN:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_page_in(vcpu->kvm, ret = kvmppc_h_svm_page_in(vcpu->kvm,
kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); kvmppc_get_gpr(vcpu, 6));
break; break;
case H_SVM_PAGE_OUT: case H_SVM_PAGE_OUT:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_page_out(vcpu->kvm, ret = kvmppc_h_svm_page_out(vcpu->kvm,
kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6)); kvmppc_get_gpr(vcpu, 6));
break; break;
case H_SVM_INIT_START: case H_SVM_INIT_START:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_start(vcpu->kvm); ret = kvmppc_h_svm_init_start(vcpu->kvm);
break; break;
case H_SVM_INIT_DONE: case H_SVM_INIT_DONE:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_done(vcpu->kvm); ret = kvmppc_h_svm_init_done(vcpu->kvm);
break; break;
case H_SVM_INIT_ABORT: case H_SVM_INIT_ABORT:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
ret = kvmppc_h_svm_init_abort(vcpu->kvm); ret = kvmppc_h_svm_init_abort(vcpu->kvm);
break; break;
...@@ -3615,6 +3625,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3615,6 +3625,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
kvmppc_nested_cede(vcpu); kvmppc_nested_cede(vcpu);
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0; trap = 0;
} }
} else { } else {
...@@ -4554,11 +4565,6 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) ...@@ -4554,11 +4565,6 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
} }
} }
static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
{
return;
}
void kvmppc_setup_partition_table(struct kvm *kvm) void kvmppc_setup_partition_table(struct kvm *kvm)
{ {
unsigned long dw0, dw1; unsigned long dw0, dw1;
...@@ -5422,6 +5428,21 @@ static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) ...@@ -5422,6 +5428,21 @@ static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
vpa->update_pending = 0; vpa->update_pending = 0;
} }
/*
* Enable a guest to become a secure VM, or test whether
* that could be enabled.
* Called when the KVM_CAP_PPC_SECURE_GUEST capability is
* tested (kvm == NULL) or enabled (kvm != NULL).
*/
static int kvmhv_enable_svm(struct kvm *kvm)
{
if (!kvmppc_uvmem_available())
return -EINVAL;
if (kvm)
kvm->arch.svm_enabled = 1;
return 0;
}
/* /*
* IOCTL handler to turn off secure mode of guest * IOCTL handler to turn off secure mode of guest
* *
...@@ -5522,7 +5543,6 @@ static struct kvmppc_ops kvm_ops_hv = { ...@@ -5522,7 +5543,6 @@ static struct kvmppc_ops kvm_ops_hv = {
.age_hva = kvm_age_hva_hv, .age_hva = kvm_age_hva_hv,
.test_age_hva = kvm_test_age_hva_hv, .test_age_hva = kvm_test_age_hva_hv,
.set_spte_hva = kvm_set_spte_hva_hv, .set_spte_hva = kvm_set_spte_hva_hv,
.mmu_destroy = kvmppc_mmu_destroy_hv,
.free_memslot = kvmppc_core_free_memslot_hv, .free_memslot = kvmppc_core_free_memslot_hv,
.init_vm = kvmppc_core_init_vm_hv, .init_vm = kvmppc_core_init_vm_hv,
.destroy_vm = kvmppc_core_destroy_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv,
...@@ -5543,6 +5563,7 @@ static struct kvmppc_ops kvm_ops_hv = { ...@@ -5543,6 +5563,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.enable_nested = kvmhv_enable_nested, .enable_nested = kvmhv_enable_nested,
.load_from_eaddr = kvmhv_load_from_eaddr, .load_from_eaddr = kvmhv_load_from_eaddr,
.store_to_eaddr = kvmhv_store_to_eaddr, .store_to_eaddr = kvmhv_store_to_eaddr,
.enable_svm = kvmhv_enable_svm,
.svm_off = kvmhv_svm_off, .svm_off = kvmhv_svm_off,
}; };
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
...@@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
u64 newmsr, bescr; u64 newmsr, bescr;
int ra, rs; int ra, rs;
switch (instr & 0xfc0007ff) { /*
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
* in these instructions, so masking bit 31 out doesn't change these
* instructions. For treclaim., tsr., and trechkpt. instructions if bit
* 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
* 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
* 31 is an acceptable way to handle these invalid forms that have
* bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
* bit 31 set) can generate a softpatch interrupt. Hence both forms
* are handled below for these instructions so they behave the same way.
*/
switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID: case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */ /* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1; newmsr = vcpu->arch.shregs.srr1;
...@@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr; vcpu->arch.shregs.msr = newmsr;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_TSR: /* ignore bit 31, see comment above */
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* check for PR=1 and arch 2.06 bit set in PCR */ /* check for PR=1 and arch 2.06 bit set in PCR */
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */ /* generate an illegal instruction interrupt */
...@@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = msr; vcpu->arch.shregs.msr = msr;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_TRECLAIM: /* ignore bit 31, see comment above */
case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
/* check for TM disabled in the HFSCR or MSR */ /* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) { if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */ /* generate an illegal instruction interrupt */
...@@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_TRECHKPT: /* ignore bit 31, see comment above */
case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
/* XXX do we need to check for PR=0 here? */ /* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */ /* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) { if (!(vcpu->arch.hfscr & HFSCR_TM)) {
...@@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) ...@@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
} }
/* What should we do here? We didn't recognize the instruction */ /* What should we do here? We didn't recognize the instruction */
WARN_ON_ONCE(1); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) ...@@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
u64 newmsr, msr, bescr; u64 newmsr, msr, bescr;
int rs; int rs;
switch (instr & 0xfc0007ff) { /*
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
* in these instructions, so masking bit 31 out doesn't change these
* instructions. For the tsr. instruction if bit 31 = 0 then it is per
* ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
* Forms, informs specifically that ignoring bit 31 is an acceptable way
* to handle TM-related invalid forms that have bit 31 = 0. Moreover,
* for emulation purposes both forms (w/ and wo/ bit 31 set) can
* generate a softpatch interrupt. Hence both forms are handled below
* for tsr. to make them behave the same way.
*/
switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID: case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */ /* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1; newmsr = vcpu->arch.shregs.srr1;
...@@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) ...@@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr; vcpu->arch.shregs.msr = newmsr;
return 1; return 1;
case PPC_INST_TSR: /* ignore bit 31, see comment above */
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* we know the MSR has the TS field = S (0b01) here */ /* we know the MSR has the TS field = S (0b01) here */
msr = vcpu->arch.shregs.msr; msr = vcpu->arch.shregs.msr;
/* check for PR=1 and arch 2.06 bit set in PCR */ /* check for PR=1 and arch 2.06 bit set in PCR */
......
...@@ -113,6 +113,15 @@ struct kvmppc_uvmem_page_pvt { ...@@ -113,6 +113,15 @@ struct kvmppc_uvmem_page_pvt {
bool skip_page_out; bool skip_page_out;
}; };
bool kvmppc_uvmem_available(void)
{
/*
* If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
* and our data structures have been initialized successfully.
*/
return !!kvmppc_uvmem_bitmap;
}
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{ {
struct kvmppc_uvmem_slot *p; struct kvmppc_uvmem_slot *p;
...@@ -209,6 +218,8 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) ...@@ -209,6 +218,8 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
int ret = H_SUCCESS; int ret = H_SUCCESS;
int srcu_idx; int srcu_idx;
kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
if (!kvmppc_uvmem_bitmap) if (!kvmppc_uvmem_bitmap)
return H_UNSUPPORTED; return H_UNSUPPORTED;
...@@ -216,6 +227,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) ...@@ -216,6 +227,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
if (!kvm_is_radix(kvm)) if (!kvm_is_radix(kvm))
return H_UNSUPPORTED; return H_UNSUPPORTED;
/* NAK the transition to secure if not enabled */
if (!kvm->arch.svm_enabled)
return H_AUTHORITY;
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) { kvm_for_each_memslot(memslot, slots) {
...@@ -233,7 +248,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) ...@@ -233,7 +248,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
goto out; goto out;
} }
} }
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
out: out:
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret; return ret;
...@@ -806,6 +820,9 @@ int kvmppc_uvmem_init(void) ...@@ -806,6 +820,9 @@ int kvmppc_uvmem_init(void)
void kvmppc_uvmem_free(void) void kvmppc_uvmem_free(void)
{ {
if (!kvmppc_uvmem_bitmap)
return;
memunmap_pages(&kvmppc_uvmem_pgmap); memunmap_pages(&kvmppc_uvmem_pgmap);
release_mem_region(kvmppc_uvmem_pgmap.res.start, release_mem_region(kvmppc_uvmem_pgmap.res.start,
resource_size(&kvmppc_uvmem_pgmap.res)); resource_size(&kvmppc_uvmem_pgmap.res));
......
...@@ -740,7 +740,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -740,7 +740,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
(vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
pte.raddr &= ~SPLIT_HACK_MASK; pte.raddr &= ~SPLIT_HACK_MASK;
/* fall through */ fallthrough;
case MSR_IR: case MSR_IR:
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
...@@ -1795,7 +1795,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) ...@@ -1795,7 +1795,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
err = kvmppc_mmu_init(vcpu); err = kvmppc_mmu_init_pr(vcpu);
if (err < 0) if (err < 0)
goto free_shared_page; goto free_shared_page;
...@@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) ...@@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
kvmppc_mmu_destroy_pr(vcpu);
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kfree(vcpu->arch.shadow_vcpu); kfree(vcpu->arch.shadow_vcpu);
...@@ -2086,7 +2087,6 @@ static struct kvmppc_ops kvm_ops_pr = { ...@@ -2086,7 +2087,6 @@ static struct kvmppc_ops kvm_ops_pr = {
.age_hva = kvm_age_hva_pr, .age_hva = kvm_age_hva_pr,
.test_age_hva = kvm_test_age_hva_pr, .test_age_hva = kvm_test_age_hva_pr,
.set_spte_hva = kvm_set_spte_hva_pr, .set_spte_hva = kvm_set_spte_hva_pr,
.mmu_destroy = kvmppc_mmu_destroy_pr,
.free_memslot = kvmppc_core_free_memslot_pr, .free_memslot = kvmppc_core_free_memslot_pr,
.init_vm = kvmppc_core_init_vm_pr, .init_vm = kvmppc_core_init_vm_pr,
.destroy_vm = kvmppc_core_destroy_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr,
......
...@@ -421,11 +421,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, ...@@ -421,11 +421,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DATA_STORAGE: case BOOKE_IRQPRIO_DATA_STORAGE:
case BOOKE_IRQPRIO_ALIGNMENT: case BOOKE_IRQPRIO_ALIGNMENT:
update_dear = true; update_dear = true;
/* fall through */ fallthrough;
case BOOKE_IRQPRIO_INST_STORAGE: case BOOKE_IRQPRIO_INST_STORAGE:
case BOOKE_IRQPRIO_PROGRAM: case BOOKE_IRQPRIO_PROGRAM:
update_esr = true; update_esr = true;
/* fall through */ fallthrough;
case BOOKE_IRQPRIO_ITLB_MISS: case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL: case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL: case BOOKE_IRQPRIO_FP_UNAVAIL:
...@@ -459,7 +459,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, ...@@ -459,7 +459,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DECREMENTER: case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT: case BOOKE_IRQPRIO_FIT:
keep_irq = true; keep_irq = true;
/* fall through */ fallthrough;
case BOOKE_IRQPRIO_EXTERNAL: case BOOKE_IRQPRIO_EXTERNAL:
case BOOKE_IRQPRIO_DBELL: case BOOKE_IRQPRIO_DBELL:
allowed = vcpu->arch.shared->msr & MSR_EE; allowed = vcpu->arch.shared->msr & MSR_EE;
...@@ -2073,11 +2073,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -2073,11 +2073,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
kvmppc_clear_dbsr(); kvmppc_clear_dbsr();
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
}
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)
{ {
return kvm->arch.kvm_ops->init_vm(kvm); return kvm->arch.kvm_ops->init_vm(kvm);
......
...@@ -94,7 +94,6 @@ enum int_class { ...@@ -94,7 +94,6 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
...@@ -102,7 +101,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ...@@ -102,7 +101,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val); ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val); ulong *spr_val);
extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
......
...@@ -490,7 +490,6 @@ static struct kvmppc_ops kvm_ops_e500 = { ...@@ -490,7 +490,6 @@ static struct kvmppc_ops kvm_ops_e500 = {
.vcpu_put = kvmppc_core_vcpu_put_e500, .vcpu_put = kvmppc_core_vcpu_put_e500,
.vcpu_create = kvmppc_core_vcpu_create_e500, .vcpu_create = kvmppc_core_vcpu_create_e500,
.vcpu_free = kvmppc_core_vcpu_free_e500, .vcpu_free = kvmppc_core_vcpu_free_e500,
.mmu_destroy = kvmppc_mmu_destroy_e500,
.init_vm = kvmppc_core_init_vm_e500, .init_vm = kvmppc_core_init_vm_e500,
.destroy_vm = kvmppc_core_destroy_vm_e500, .destroy_vm = kvmppc_core_destroy_vm_e500,
.emulate_op = kvmppc_core_emulate_op_e500, .emulate_op = kvmppc_core_emulate_op_e500,
......
...@@ -533,10 +533,6 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, ...@@ -533,10 +533,6 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
return get_tlb_raddr(gtlbe) | (eaddr & pgmask); return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
} }
void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
{
}
/*****************************************/ /*****************************************/
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
......
...@@ -376,7 +376,6 @@ static struct kvmppc_ops kvm_ops_e500mc = { ...@@ -376,7 +376,6 @@ static struct kvmppc_ops kvm_ops_e500mc = {
.vcpu_put = kvmppc_core_vcpu_put_e500mc, .vcpu_put = kvmppc_core_vcpu_put_e500mc,
.vcpu_create = kvmppc_core_vcpu_create_e500mc, .vcpu_create = kvmppc_core_vcpu_create_e500mc,
.vcpu_free = kvmppc_core_vcpu_free_e500mc, .vcpu_free = kvmppc_core_vcpu_free_e500mc,
.mmu_destroy = kvmppc_mmu_destroy_e500,
.init_vm = kvmppc_core_init_vm_e500mc, .init_vm = kvmppc_core_init_vm_e500mc,
.destroy_vm = kvmppc_core_destroy_vm_e500mc, .destroy_vm = kvmppc_core_destroy_vm_e500mc,
.emulate_op = kvmppc_core_emulate_op_e500, .emulate_op = kvmppc_core_emulate_op_e500,
......
...@@ -524,7 +524,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -524,7 +524,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1; r = 1;
break; break;
case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
/* fall through */
case KVM_CAP_PPC_PAIRED_SINGLES: case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI: case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO: case KVM_CAP_PPC_GET_PVINFO:
...@@ -669,6 +668,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -669,6 +668,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
(hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
break; break;
#endif
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
case KVM_CAP_PPC_SECURE_GUEST:
r = hv_enabled && kvmppc_hv_ops->enable_svm &&
!kvmppc_hv_ops->enable_svm(NULL);
break;
#endif #endif
default: default:
r = 0; r = 0;
...@@ -751,7 +756,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -751,7 +756,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return 0; return 0;
out_vcpu_uninit: out_vcpu_uninit:
kvmppc_mmu_destroy(vcpu);
kvmppc_subarch_vcpu_uninit(vcpu); kvmppc_subarch_vcpu_uninit(vcpu);
return err; return err;
} }
...@@ -784,7 +788,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -784,7 +788,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvmppc_core_vcpu_free(vcpu); kvmppc_core_vcpu_free(vcpu);
kvmppc_mmu_destroy(vcpu);
kvmppc_subarch_vcpu_uninit(vcpu); kvmppc_subarch_vcpu_uninit(vcpu);
} }
...@@ -2169,6 +2172,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, ...@@ -2169,6 +2172,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
break; break;
r = kvm->arch.kvm_ops->enable_nested(kvm); r = kvm->arch.kvm_ops->enable_nested(kvm);
break; break;
#endif
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
case KVM_CAP_PPC_SECURE_GUEST:
r = -EINVAL;
if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
break;
r = kvm->arch.kvm_ops->enable_svm(kvm);
break;
#endif #endif
default: default:
r = -EINVAL; r = -EINVAL;
......
...@@ -1016,6 +1016,7 @@ struct kvm_ppc_resize_hpt { ...@@ -1016,6 +1016,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_INJECT_EXT_DABT 178 #define KVM_CAP_ARM_INJECT_EXT_DABT 178
#define KVM_CAP_S390_VCPU_RESETS 179 #define KVM_CAP_S390_VCPU_RESETS 179
#define KVM_CAP_S390_PROTECTED 180 #define KVM_CAP_S390_PROTECTED 180
#define KVM_CAP_PPC_SECURE_GUEST 181
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment