Commit 9f7d5bb5 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity

KVM: ia64: Add handler for crashed vmm

Since vmm runs in an isolated address space and it is just a copy
of host's kvm-intel module, so once vmm crashes, we just crash all guests
running on it instead of crashing whole kernel.
Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 5e2be198
...@@ -942,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu) ...@@ -942,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu)
ia64_set_pta(vcpu->arch.vhpt.pta.val); ia64_set_pta(vcpu->arch.vhpt.pta.val);
} }
static void vmm_sanity_check(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
panic_vm(vcpu, "Failed to do vmm sanity check,"
"it maybe caused by crashed vmm!!\n\n");
}
}
static void kvm_do_resume_op(struct kvm_vcpu *vcpu) static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
{ {
vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/
if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
vcpu_do_resume(vcpu); vcpu_do_resume(vcpu);
return; return;
...@@ -969,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu) ...@@ -969,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu)
1, 0, 0, 0, 0, 0); 1, 0, 0, 0, 0, 0);
kvm_do_resume_op(vcpu); kvm_do_resume_op(vcpu);
} }
void vmm_panic_handler(u64 vec)
{
struct kvm_vcpu *vcpu = current_vcpu;
vmm_sanity = 0;
panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
vec2off[vec]);
}
...@@ -741,5 +741,8 @@ void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); ...@@ -741,5 +741,8 @@ void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5, u64 arg6, u64 arg7); u64 arg4, u64 arg5, u64 arg6, u64 arg7);
extern long vmm_sanity;
#endif #endif
#endif /* __VCPU_H__ */ #endif /* __VCPU_H__ */
...@@ -32,6 +32,8 @@ MODULE_LICENSE("GPL"); ...@@ -32,6 +32,8 @@ MODULE_LICENSE("GPL");
extern char kvm_ia64_ivt; extern char kvm_ia64_ivt;
extern fpswa_interface_t *vmm_fpswa_interface; extern fpswa_interface_t *vmm_fpswa_interface;
long vmm_sanity = 1;
struct kvm_vmm_info vmm_info = { struct kvm_vmm_info vmm_info = {
.module = THIS_MODULE, .module = THIS_MODULE,
.vmm_entry = vmm_entry, .vmm_entry = vmm_entry,
......
...@@ -70,14 +70,12 @@ ...@@ -70,14 +70,12 @@
# define PSR_DEFAULT_BITS 0 # define PSR_DEFAULT_BITS 0
#endif #endif
#define KVM_FAULT(n) \ #define KVM_FAULT(n) \
kvm_fault_##n:; \ kvm_fault_##n:; \
mov r19=n;; \ mov r19=n;; \
br.sptk.many kvm_fault_##n; \ br.sptk.many kvm_vmm_panic; \
;; \ ;; \
#define KVM_REFLECT(n) \ #define KVM_REFLECT(n) \
mov r31=pr; \ mov r31=pr; \
mov r19=n; /* prepare to save predicates */ \ mov r19=n; /* prepare to save predicates */ \
...@@ -85,17 +83,26 @@ ...@@ -85,17 +83,26 @@
;; \ ;; \
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
(p7)br.sptk.many kvm_dispatch_reflection; \ (p7)br.sptk.many kvm_dispatch_reflection; \
br.sptk.many kvm_panic; \ br.sptk.many kvm_vmm_panic; \
GLOBAL_ENTRY(kvm_panic) GLOBAL_ENTRY(kvm_vmm_panic)
br.sptk.many kvm_panic KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,1,0
mov out0=r15
adds r3=8,r2 // set up second base pointer
;; ;;
END(kvm_panic) ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
//(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
mov rp=r14
;;
br.call.sptk.many b6=vmm_panic_handler;
END(kvm_vmm_panic)
.section .text.ivt,"ax" .section .text.ivt,"ax"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment