Commit 3eeafd7d authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Ensure split mode works

On PowerPC we can go into MMU Split Mode. That means that either
data relocation is on but instruction relocation is off or vice
versa.

That mode didn't work properly, as we weren't always flushing
entries when going into a new split mode, potentially mapping
different code or data that we're supposed to.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 8a5416db
...@@ -99,10 +99,11 @@ struct kvmppc_vcpu_book3s { ...@@ -99,10 +99,11 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1 #define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2 #define CONTEXT_GUEST_END 2
#define VSID_REAL 0xfffffffffff00000 #define VSID_REAL_DR 0x7ffffffffff00000
#define VSID_REAL_DR 0xffffffffffe00000 #define VSID_REAL_IR 0x7fffffffffe00000
#define VSID_REAL_IR 0xffffffffffd00000 #define VSID_SPLIT_MASK 0x7fffffffffe00000
#define VSID_BAT 0xffffffffffc00000 #define VSID_REAL 0x7fffffffffc00000
#define VSID_BAT 0x7fffffffffb00000
#define VSID_PR 0x8000000000000000 #define VSID_PR 0x8000000000000000
extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask); extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
......
...@@ -134,6 +134,14 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -134,6 +134,14 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
(vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
/* Flush split mode PTEs */
if (dr != ir)
kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
VSID_SPLIT_MASK);
kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
} }
...@@ -396,15 +404,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, ...@@ -396,15 +404,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
} else { } else {
pte->eaddr = eaddr; pte->eaddr = eaddr;
pte->raddr = eaddr & 0xffffffff; pte->raddr = eaddr & 0xffffffff;
pte->vpage = eaddr >> 12; pte->vpage = VSID_REAL | eaddr >> 12;
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
pte->vpage |= VSID_REAL;
case MSR_DR:
pte->vpage |= VSID_REAL_DR;
case MSR_IR:
pte->vpage |= VSID_REAL_IR;
}
pte->may_read = true; pte->may_read = true;
pte->may_write = true; pte->may_write = true;
pte->may_execute = true; pte->may_execute = true;
...@@ -513,12 +513,10 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -513,12 +513,10 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int page_found = 0; int page_found = 0;
struct kvmppc_pte pte; struct kvmppc_pte pte;
bool is_mmio = false; bool is_mmio = false;
bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { relocated = data ? dr : ir;
relocated = (vcpu->arch.msr & MSR_DR);
} else {
relocated = (vcpu->arch.msr & MSR_IR);
}
/* Resolve real address if translation turned on */ /* Resolve real address if translation turned on */
if (relocated) { if (relocated) {
...@@ -530,14 +528,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -530,14 +528,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.raddr = eaddr & 0xffffffff; pte.raddr = eaddr & 0xffffffff;
pte.eaddr = eaddr; pte.eaddr = eaddr;
pte.vpage = eaddr >> 12; pte.vpage = eaddr >> 12;
}
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0: case 0:
pte.vpage |= VSID_REAL; pte.vpage |= VSID_REAL;
break;
case MSR_DR: case MSR_DR:
pte.vpage |= VSID_REAL_DR; pte.vpage |= VSID_REAL_DR;
break;
case MSR_IR: case MSR_IR:
pte.vpage |= VSID_REAL_IR; pte.vpage |= VSID_REAL_IR;
} break;
} }
if (vcpu->arch.mmu.is_dcbz32(vcpu) && if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment