Commit 690ed4ca authored by Paul Mackerras's avatar Paul Mackerras Committed by Michael Ellerman

KVM: PPC: Book3S HV: Use hypercalls for TLB invalidation when nested

This adds code to call the H_TLB_INVALIDATE hypercall when running as
a guest, in the cases where we need to invalidate TLBs (or other MMU
caches) as part of managing the mappings for a nested guest.  Calling
H_TLB_INVALIDATE lets the nested hypervisor inform the parent
hypervisor about changes to partition-scoped page tables or the
partition table without needing to do hypervisor-privileged tlbie
instructions.
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent e3b6b466
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/book3s/64/mmu-hash.h> #include <asm/book3s/64/mmu-hash.h>
#include <asm/cpu_has_feature.h> #include <asm/cpu_has_feature.h>
#include <asm/ppc-opcode.h>
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
static inline bool kvmhv_on_pseries(void) static inline bool kvmhv_on_pseries(void)
...@@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, ...@@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
bool create); bool create);
void kvmhv_put_nested(struct kvm_nested_guest *gp); void kvmhv_put_nested(struct kvm_nested_guest *gp);
/* Encoding of first parameter for H_TLB_INVALIDATE */
#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
___PPC_R(r))
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */ /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
#define PPC_MIN_HPT_ORDER 18 #define PPC_MIN_HPT_ORDER 18
#define PPC_MAX_HPT_ORDER 46 #define PPC_MAX_HPT_ORDER 46
......
...@@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, ...@@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
unsigned int pshift, unsigned int lpid) unsigned int pshift, unsigned int lpid)
{ {
unsigned long psize = PAGE_SIZE; unsigned long psize = PAGE_SIZE;
int psi;
long rc;
unsigned long rb;
if (pshift) if (pshift)
psize = 1UL << pshift; psize = 1UL << pshift;
else
pshift = PAGE_SHIFT;
addr &= ~(psize - 1); addr &= ~(psize - 1);
if (!kvmhv_on_pseries()) {
radix__flush_tlb_lpid_page(lpid, addr, psize); radix__flush_tlb_lpid_page(lpid, addr, psize);
return;
}
psi = shift_to_mmu_psize(pshift);
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
lpid, rb);
if (rc)
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
} }
static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
{ {
long rc;
if (!kvmhv_on_pseries()) {
radix__flush_pwc_lpid(lpid); radix__flush_pwc_lpid(lpid);
return;
}
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
lpid, TLBIEL_INVAL_SET_LPID);
if (rc)
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
} }
static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
......
...@@ -299,14 +299,32 @@ void kvmhv_nested_exit(void) ...@@ -299,14 +299,32 @@ void kvmhv_nested_exit(void)
} }
} }
static void kvmhv_flush_lpid(unsigned int lpid)
{
long rc;
if (!kvmhv_on_pseries()) {
radix__flush_tlb_lpid(lpid);
return;
}
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
lpid, TLBIEL_INVAL_SET_LPID);
if (rc)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
{ {
if (cpu_has_feature(CPU_FTR_HVMODE)) { if (!kvmhv_on_pseries()) {
mmu_partition_table_set_entry(lpid, dw0, dw1); mmu_partition_table_set_entry(lpid, dw0, dw1);
} else { return;
}
pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
} /* L0 will do the necessary barriers */
kvmhv_flush_lpid(lpid);
} }
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
...@@ -493,7 +511,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) ...@@ -493,7 +511,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
radix__flush_tlb_lpid(gp->shadow_lpid); kvmhv_flush_lpid(gp->shadow_lpid);
kvmhv_update_ptbl_cache(gp); kvmhv_update_ptbl_cache(gp);
if (gp->l1_gr_to_hr == 0) if (gp->l1_gr_to_hr == 0)
kvmhv_remove_nested(gp); kvmhv_remove_nested(gp);
...@@ -777,7 +795,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, ...@@ -777,7 +795,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
gp->shadow_lpid); gp->shadow_lpid);
radix__flush_tlb_lpid(gp->shadow_lpid); kvmhv_flush_lpid(gp->shadow_lpid);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
break; break;
case 1: case 1:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment