Commit 9d98b3ff authored by Alexander Graf's avatar Alexander Graf

KVM: PPC: e500: Call kvmppc_mmu_map for initial mapping

When emulating tlbwe, we want to automatically map the entry that just got
written in our shadow TLB map, because chances are quite high that it's
going to be used very soon.

Today this happens explicitly, duplicating all the logic that is in
kvmppc_mmu_map() already. Just call that one instead.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 2c378fd7
...@@ -878,8 +878,8 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) ...@@ -878,8 +878,8 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel, stlbsel, sesel; int tlbsel, esel;
int recal = 0; int recal = 0;
tlbsel = get_tlb_tlbsel(vcpu); tlbsel = get_tlb_tlbsel(vcpu);
...@@ -917,40 +917,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) ...@@ -917,40 +917,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) { if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr; u64 eaddr = get_tlb_eaddr(gtlbe);
u64 raddr; u64 raddr = get_tlb_raddr(gtlbe);
switch (tlbsel) { if (tlbsel == 0) {
case 0:
/* TLB0 */
gtlbe->mas1 &= ~MAS1_TSIZE(~0); gtlbe->mas1 &= ~MAS1_TSIZE(~0);
gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
stlbsel = 0;
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
sesel = 0; /* unused */
break;
case 1:
/* TLB1 */
eaddr = get_tlb_eaddr(gtlbe);
raddr = get_tlb_raddr(gtlbe);
/* Create a 4KB mapping on the host.
* If the guest wanted a large page,
* only the first 4KB is mapped here and the rest
* are mapped on the fly. */
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
break;
default:
BUG();
} }
write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); /* Premap the faulting page */
kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
} }
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment