Commit a4cd8b23 authored by Scott Wood's avatar Scott Wood Committed by Avi Kivity

KVM: PPC: e500: enable magic page

This is a shared page used for paravirtualization.  It is always present
in the guest kernel's effective address space at the address indicated
by the hypercall that enables it.

The physical address specified by the hypercall is not used, as
e500 does not have real mode.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 9973d54e
...@@ -68,9 +68,11 @@ page that contains parts of supervisor visible register state. The guest can ...@@ -68,9 +68,11 @@ page that contains parts of supervisor visible register state. The guest can
map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE. map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE.
With this hypercall issued the guest always gets the magic page mapped at the With this hypercall issued the guest always gets the magic page mapped at the
desired location in effective and physical address space. For now, we always desired location. The first parameter indicates the effective address when the
map the page to -4096. This way we can access it using absolute load and store MMU is enabled. The second parameter indicates the address in real mode, if
functions. The following instruction reads the first field of the magic page: applicable to the target. For now, we always map the page to -4096. This way we
can access it using absolute load and store functions. The following
instruction reads the first field of the magic page:
ld rX, -4096(0) ld rX, -4096(0)
......
...@@ -109,6 +109,7 @@ extern void kvmppc_booke_exit(void); ...@@ -109,6 +109,7 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
/* /*
* Cuts out inst bits with ordering according to spec. * Cuts out inst bits with ordering according to spec.
......
...@@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr; gpa_t gpaddr;
gfn_t gfn; gfn_t gfn;
#ifdef CONFIG_KVM_E500
if (!(vcpu->arch.shared->msr & MSR_PR) &&
(eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
kvmppc_map_magic(vcpu);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
break;
}
#endif
/* Check the guest TLB. */ /* Check the guest TLB. */
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) { if (gtlb_index < 0) {
......
...@@ -76,7 +76,8 @@ static inline unsigned int tlb0_get_next_victim( ...@@ -76,7 +76,8 @@ static inline unsigned int tlb0_get_next_victim(
static inline unsigned int tlb1_max_shadow_size(void) static inline unsigned int tlb1_max_shadow_size(void)
{ {
return tlb1_entry_num - tlbcam_index; /* reserve one entry for magic page */
return tlb1_entry_num - tlbcam_index - 1;
} }
static inline int tlbe_is_writable(struct tlbe *tlbe) static inline int tlbe_is_writable(struct tlbe *tlbe)
...@@ -142,6 +143,25 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -142,6 +143,25 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
} }
} }
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
{
struct tlbe magic;
ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
pfn_t pfn;
pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
get_page(pfn_to_page(pfn));
magic.mas1 = MAS1_VALID | MAS1_TS |
MAS1_TSIZE(BOOK3E_PAGESZ_4K);
magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
magic.mas3 = (pfn << PAGE_SHIFT) |
MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
magic.mas7 = pfn >> (32 - PAGE_SHIFT);
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
}
void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
{ {
} }
......
...@@ -73,7 +73,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) ...@@ -73,7 +73,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
} }
case HC_VENDOR_KVM | KVM_HC_FEATURES: case HC_VENDOR_KVM | KVM_HC_FEATURES:
r = HC_EV_SUCCESS; r = HC_EV_SUCCESS;
#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */ #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
/* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment