Commit dac56570 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

KVM: PPC: Remove page table walk helpers

This patch remove helpers which we had used only once in the code.
Limiting page table walk variants help in ensuring that we won't
end up with code walking page table with wrong assumptions.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 5e1d44ae
...@@ -249,27 +249,6 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, ...@@ -249,27 +249,6 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#endif #endif
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned *shift); unsigned *shift);
static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
unsigned long *pte_sizep)
{
pte_t *ptep;
unsigned long ps = *pte_sizep;
unsigned int shift;
ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
if (!ptep)
return NULL;
if (shift)
*pte_sizep = 1ul << shift;
else
*pte_sizep = PAGE_SIZE;
if (ps > *pte_sizep)
return NULL;
return ptep;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -131,25 +131,6 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, ...@@ -131,25 +131,6 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unlock_rmap(rmap); unlock_rmap(rmap);
} }
static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
int writing, unsigned long *pte_sizep)
{
pte_t *ptep;
unsigned long ps = *pte_sizep;
unsigned int hugepage_shift;
ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
if (!ptep)
return __pte(0);
if (hugepage_shift)
*pte_sizep = 1ul << hugepage_shift;
else
*pte_sizep = PAGE_SIZE;
if (ps > *pte_sizep)
return __pte(0);
return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
}
static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{ {
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
...@@ -166,10 +147,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -166,10 +147,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
struct revmap_entry *rev; struct revmap_entry *rev;
unsigned long g_ptel; unsigned long g_ptel;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long pte_size; unsigned hpage_shift;
unsigned long is_io; unsigned long is_io;
unsigned long *rmap; unsigned long *rmap;
pte_t pte; pte_t *ptep;
unsigned int writing; unsigned int writing;
unsigned long mmu_seq; unsigned long mmu_seq;
unsigned long rcbits; unsigned long rcbits;
...@@ -208,22 +189,33 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -208,22 +189,33 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */ /* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn); hva = __gfn_to_hva_memslot(memslot, gfn);
ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
/* Look up the Linux PTE for the backing page */ if (hpage_shift)
pte_size = psize; host_pte_size = 1ul << hpage_shift;
pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size); else
host_pte_size = PAGE_SIZE;
/*
* We should always find the guest page size
* to <= host page size, if host is using hugepage
*/
if (host_pte_size < psize)
return H_PARAMETER;
pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
if (pte_present(pte) && !pte_protnone(pte)) { if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel); ptel = hpte_make_readonly(ptel);
is_io = hpte_cache_bits(pte_val(pte)); is_io = hpte_cache_bits(pte_val(pte));
pa = pte_pfn(pte) << PAGE_SHIFT; pa = pte_pfn(pte) << PAGE_SHIFT;
pa |= hva & (pte_size - 1); pa |= hva & (host_pte_size - 1);
pa |= gpa & ~PAGE_MASK; pa |= gpa & ~PAGE_MASK;
} }
}
if (pte_size < psize)
return H_PARAMETER;
ptel &= ~(HPTE_R_PP0 - psize); ptel &= ~(HPTE_R_PP0 - psize);
ptel |= pa; ptel |= pa;
......
...@@ -468,7 +468,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -468,7 +468,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
pgdir = vcpu_e500->vcpu.arch.pgdir; pgdir = vcpu_e500->vcpu.arch.pgdir;
ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages); ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
if (ptep) { if (ptep) {
pte_t pte = READ_ONCE(*ptep); pte_t pte = READ_ONCE(*ptep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment