Commit 4d2be6f7 authored by Scott Wood's avatar Scott Wood Committed by Alexander Graf

kvm/ppc/e500: eliminate tlb_refs

Commit 523f0e54 ("KVM: PPC: E500:
Explicitly mark shadow maps invalid") began using E500_TLB_VALID
for guest TLB1 entries, and skipping invalidations if it's not set.

However, when E500_TLB_VALID was set for such entries, it was on a
fake local ref, and so the invalidations never happen.  gtlb_privs
is documented as being only for guest TLB0, though we already violate
that with E500_TLB_BITMAP.

Now that we have MMU notifiers, and thus don't need to actually
retain a reference to the mapped pages, get rid of tlb_refs, and
use gtlb_privs for E500_TLB_VALID in TLB1.

Since we can have more than one host TLB entry for a given tlbe_ref,
be careful not to clear existing flags that are relevant to other
host TLB entries when preparing a new host TLB entry.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 66a5fecd
...@@ -26,17 +26,20 @@ ...@@ -26,17 +26,20 @@
#define E500_PID_NUM 3 #define E500_PID_NUM 3
#define E500_TLB_NUM 2 #define E500_TLB_NUM 2
#define E500_TLB_VALID 1 /* entry is mapped somewhere in host TLB */
#define E500_TLB_BITMAP 2 #define E500_TLB_VALID (1 << 0)
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
#define E500_TLB_BITMAP (1 << 1)
/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2) #define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref { struct tlbe_ref {
pfn_t pfn; pfn_t pfn; /* valid only for TLB0, except briefly */
unsigned int flags; /* E500_TLB_* */ unsigned int flags; /* E500_TLB_* */
}; };
struct tlbe_priv { struct tlbe_priv {
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ struct tlbe_ref ref;
}; };
#ifdef CONFIG_KVM_E500V2 #ifdef CONFIG_KVM_E500V2
...@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { ...@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM]; unsigned int gtlb_nv[E500_TLB_NUM];
/*
* information associated with each host TLB entry --
* TLB1 only for now. If/when guest TLB1 entries can be
* mapped with host TLB0, this will be used for that too.
*
* We don't want to use this for guest TLB0 because then we'd
* have the overhead of doing the translation again even if
* the entry is still in the guest TLB (e.g. we swapped out
* and back, and our host TLB entries got evicted).
*/
struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv; unsigned int host_tlb1_nv;
u32 svr; u32 svr;
......
...@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, ...@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */ /* Don't bother with unmapped entries */
if (!(ref->flags & E500_TLB_VALID)) if (!(ref->flags & E500_TLB_VALID)) {
return; WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
"%s: flags %x\n", __func__, ref->flags);
WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
}
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
...@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, ...@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn) pfn_t pfn)
{ {
ref->pfn = pfn; ref->pfn = pfn;
ref->flags = E500_TLB_VALID; ref->flags |= E500_TLB_VALID;
if (tlbe_is_writable(gtlbe)) if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
...@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, ...@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{ {
if (ref->flags & E500_TLB_VALID) { if (ref->flags & E500_TLB_VALID) {
/* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags); trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0; ref->flags = 0;
} }
...@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
int tlbsel = 0; int tlbsel;
int i; int i;
for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
struct tlbe_ref *ref = struct tlbe_ref *ref =
&vcpu_e500->gtlb_priv[tlbsel][i].ref; &vcpu_e500->gtlb_priv[tlbsel][i].ref;
kvmppc_e500_ref_release(ref); kvmppc_e500_ref_release(ref);
} }
}
static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int stlbsel = 1;
int i;
kvmppc_e500_tlbil_all(vcpu_e500);
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
struct tlbe_ref *ref =
&vcpu_e500->tlb_refs[stlbsel][i];
kvmppc_e500_ref_release(ref);
} }
clear_tlb_privs(vcpu_e500);
} }
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
clear_tlb_refs(vcpu_e500); kvmppc_e500_tlbil_all(vcpu_e500);
clear_tlb_privs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500); clear_tlb1_bitmap(vcpu_e500);
} }
...@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
} }
/* Drop old ref and setup new one. */
kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
...@@ -512,10 +501,10 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -512,10 +501,10 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
} }
vcpu_e500->tlb_refs[1][sesel] = *ref;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
WARN_ON(!(ref->flags & E500_TLB_VALID));
return sesel; return sesel;
} }
...@@ -527,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -527,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel) struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{ {
struct tlbe_ref ref; struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
int sesel; int sesel;
int r; int r;
ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
&ref); ref);
if (r) if (r)
return r; return r;
...@@ -545,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -545,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
} }
/* Otherwise map into TLB1 */ /* Otherwise map into TLB1 */
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0; return 0;
...@@ -566,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, ...@@ -566,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0: case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
/* Triggers after clear_tlb_refs or on initial mapping */ /* Triggers after clear_tlb_privs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) { if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else { } else {
...@@ -666,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -666,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1; host_tlb_params[1].sets = 1;
vcpu_e500->tlb_refs[0] =
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
GFP_KERNEL);
if (!vcpu_e500->tlb_refs[0])
goto err;
vcpu_e500->tlb_refs[1] =
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->tlb_refs[1])
goto err;
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries, host_tlb_params[1].entries,
GFP_KERNEL); GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap) if (!vcpu_e500->h2g_tlb1_rmap)
goto err; return -EINVAL;
return 0; return 0;
err:
kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]);
return -EINVAL;
} }
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
kfree(vcpu_e500->h2g_tlb1_rmap); kfree(vcpu_e500->h2g_tlb1_rmap);
kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment