Commit e8508940 authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Magic Page Book3s support

We need to override EA as well as PA lookups for the magic page. When the guest
tells us to project it, the magic page overrides any guest mappings.

In order to reflect that, we need to hook into all the MMU layers of KVM to
force map the magic page if necessary.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent beb03f14
...@@ -130,6 +130,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, ...@@ -130,6 +130,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val); bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter; extern u32 kvmppc_trampoline_enter;
......
...@@ -419,6 +419,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) ...@@ -419,6 +419,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
} }
} }
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
/* Magic page override */
if (unlikely(mp_pa) &&
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
((mp_pa & PAGE_MASK) & KVM_PAM))) {
ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
pfn_t pfn;
pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
get_page(pfn_to_page(pfn));
return pfn;
}
return gfn_to_pfn(vcpu->kvm, gfn);
}
/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
* make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
* emulate 32 bytes dcbz length. * emulate 32 bytes dcbz length.
...@@ -554,6 +573,13 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, ...@@ -554,6 +573,13 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa;
if (unlikely(mp_pa) &&
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
return 1;
}
return kvm_is_visible_gfn(vcpu->kvm, gfn); return kvm_is_visible_gfn(vcpu->kvm, gfn);
} }
...@@ -1257,6 +1283,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1257,6 +1283,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
struct kvmppc_vcpu_book3s *vcpu_book3s; struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int err = -ENOMEM; int err = -ENOMEM;
unsigned long p;
vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
if (!vcpu_book3s) if (!vcpu_book3s)
...@@ -1274,8 +1301,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1274,8 +1301,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err) if (err)
goto free_shadow_vcpu; goto free_shadow_vcpu;
vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared) /* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p)
goto uninit_vcpu; goto uninit_vcpu;
vcpu->arch.host_retip = kvm_return_point; vcpu->arch.host_retip = kvm_return_point;
...@@ -1322,7 +1351,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -1322,7 +1351,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
free_page((unsigned long)vcpu->arch.shared); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kfree(vcpu_book3s->shadow_vcpu); kfree(vcpu_book3s->shadow_vcpu);
vfree(vcpu_book3s); vfree(vcpu_book3s);
......
...@@ -281,8 +281,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -281,8 +281,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data) struct kvmppc_pte *pte, bool data)
{ {
int r; int r;
ulong mp_ea = vcpu->arch.magic_page_ea;
pte->eaddr = eaddr; pte->eaddr = eaddr;
/* Magic page override */
if (unlikely(mp_ea) &&
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
pte->raddr &= KVM_PAM;
pte->may_execute = true;
pte->may_read = true;
pte->may_write = true;
return 0;
}
r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
if (r < 0) if (r < 0)
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
......
...@@ -147,7 +147,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -147,7 +147,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
struct hpte_cache *pte; struct hpte_cache *pte;
/* Get host physical address for gpa */ /* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) { if (kvm_is_error_hva(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr); orig_pte->eaddr);
......
...@@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bool found = false; bool found = false;
bool perm_err = false; bool perm_err = false;
int second = 0; int second = 0;
ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */
if (unlikely(mp_ea) &&
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
gpte->raddr &= KVM_PAM;
gpte->may_execute = true;
gpte->may_read = true;
gpte->may_write = true;
return 0;
}
slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
if (!slbe) if (!slbe)
...@@ -445,6 +461,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ...@@ -445,6 +461,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong ea = esid << SID_SHIFT; ulong ea = esid << SID_SHIFT;
struct kvmppc_slb *slb; struct kvmppc_slb *slb;
u64 gvsid = esid; u64 gvsid = esid;
ulong mp_ea = vcpu->arch.magic_page_ea;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
...@@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ...@@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
break; break;
case MSR_DR|MSR_IR: case MSR_DR|MSR_IR:
if (!slb) if (!slb)
return -ENOENT; goto no_slb;
*vsid = gvsid; *vsid = gvsid;
break; break;
...@@ -477,6 +494,17 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ...@@ -477,6 +494,17 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
*vsid |= VSID_PR; *vsid |= VSID_PR;
return 0; return 0;
no_slb:
/* Catch magic page case */
if (unlikely(mp_ea) &&
unlikely(esid == (mp_ea >> SID_SHIFT)) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
*vsid = VSID_REAL | esid;
return 0;
}
return -EINVAL;
} }
static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
......
...@@ -101,18 +101,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -101,18 +101,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
struct kvmppc_sid_map *map; struct kvmppc_sid_map *map;
/* Get host physical address for gpa */ /* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) { if (kvm_is_error_hva(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
return -EINVAL; return -EINVAL;
} }
hpaddr <<= PAGE_SHIFT; hpaddr <<= PAGE_SHIFT;
#if PAGE_SHIFT == 12 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
#elif PAGE_SHIFT == 16
hpaddr |= orig_pte->raddr & 0xf000;
#else
#error Unknown page size
#endif
/* and write the mapping ea -> hpa into the pt */ /* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment