Commit 96bc451a authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Introduce shared page

For transparent variable sharing between the hypervisor and guest, I introduce
a shared page. This shared page will contain all the registers the guest can
read and write safely without exiting guest context.

This patch only implements the stubs required for the basic structure of the
shared page. The actual register moving follows.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 34698d8c
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/kvm_para.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#define KVM_MAX_VCPUS 1 #define KVM_MAX_VCPUS 1
...@@ -290,6 +291,7 @@ struct kvm_vcpu_arch { ...@@ -290,6 +291,7 @@ struct kvm_vcpu_arch {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u64 dec_jiffies; u64 dec_jiffies;
unsigned long pending_exceptions; unsigned long pending_exceptions;
struct kvm_vcpu_arch_shared *shared;
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
......
...@@ -20,6 +20,11 @@ ...@@ -20,6 +20,11 @@
#ifndef __POWERPC_KVM_PARA_H__ #ifndef __POWERPC_KVM_PARA_H__
#define __POWERPC_KVM_PARA_H__ #define __POWERPC_KVM_PARA_H__
#include <linux/types.h>
struct kvm_vcpu_arch_shared {
};
#ifdef __KERNEL__ #ifdef __KERNEL__
static inline int kvm_para_available(void) static inline int kvm_para_available(void)
......
...@@ -400,6 +400,7 @@ int main(void) ...@@ -400,6 +400,7 @@ int main(void)
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
/* book3s */ /* book3s */
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
......
...@@ -123,8 +123,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -123,8 +123,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err) if (err)
goto free_vcpu; goto free_vcpu;
vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared)
goto uninit_vcpu;
return vcpu; return vcpu;
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu: free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu_44x); kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
out: out:
...@@ -135,6 +141,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -135,6 +141,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
free_page((unsigned long)vcpu->arch.shared);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_44x); kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
} }
......
...@@ -1242,6 +1242,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1242,6 +1242,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err) if (err)
goto free_shadow_vcpu; goto free_shadow_vcpu;
vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared)
goto uninit_vcpu;
vcpu->arch.host_retip = kvm_return_point; vcpu->arch.host_retip = kvm_return_point;
vcpu->arch.host_msr = mfmsr(); vcpu->arch.host_msr = mfmsr();
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
...@@ -1268,10 +1272,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1268,10 +1272,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
err = kvmppc_mmu_init(vcpu); err = kvmppc_mmu_init(vcpu);
if (err < 0) if (err < 0)
goto free_shadow_vcpu; goto uninit_vcpu;
return vcpu; return vcpu;
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_shadow_vcpu: free_shadow_vcpu:
kfree(vcpu_book3s->shadow_vcpu); kfree(vcpu_book3s->shadow_vcpu);
free_vcpu: free_vcpu:
...@@ -1284,6 +1290,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -1284,6 +1290,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
free_page((unsigned long)vcpu->arch.shared);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kfree(vcpu_book3s->shadow_vcpu); kfree(vcpu_book3s->shadow_vcpu);
vfree(vcpu_book3s); vfree(vcpu_book3s);
......
...@@ -117,8 +117,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -117,8 +117,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err) if (err)
goto uninit_vcpu; goto uninit_vcpu;
vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared)
goto uninit_tlb;
return vcpu; return vcpu;
uninit_tlb:
kvmppc_e500_tlb_uninit(vcpu_e500);
uninit_vcpu: uninit_vcpu:
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
free_vcpu: free_vcpu:
...@@ -131,6 +137,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -131,6 +137,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
free_page((unsigned long)vcpu->arch.shared);
kvmppc_e500_tlb_uninit(vcpu_e500); kvmppc_e500_tlb_uninit(vcpu_e500);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500); kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment