Commit 4aa1444f authored by Marc Zyngier's avatar Marc Zyngier Committed by Jiri Slaby

ARM: KVM: introduce kvm_p*d_addr_end

commit a3c8bd31 upstream.

The use of p*d_addr_end with stage-2 translation is slightly dodgy,
as the IPA is 40bits, while all the p*d_addr_end helpers are
taking an unsigned long (arm64 is fine with that as unligned long
is 64bit).

The fix is to introduce 64bit clean versions of the same helpers,
and use them in the stage-2 page table code.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarShannon Zhao <shannon.zhao@linaro.org>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent 6dc878f7
...@@ -103,6 +103,19 @@ static inline void kvm_set_s2pte_writable(pte_t *pte) ...@@ -103,6 +103,19 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= L_PTE_S2_RDWR; pte_val(*pte) |= L_PTE_S2_RDWR;
} }
/* Open coded p*d_addr_end that can deal with 64bit addresses */
#define kvm_pgd_addr_end(addr, end) \
({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#define kvm_pud_addr_end(addr,end) (end)
#define kvm_pmd_addr_end(addr, end) \
({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
struct kvm; struct kvm;
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
......
...@@ -134,13 +134,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, ...@@ -134,13 +134,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
pgd = pgdp + pgd_index(addr); pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (pud_none(*pud)) { if (pud_none(*pud)) {
addr = pud_addr_end(addr, end); addr = kvm_pud_addr_end(addr, end);
continue; continue;
} }
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
addr = pmd_addr_end(addr, end); addr = kvm_pmd_addr_end(addr, end);
continue; continue;
} }
...@@ -151,10 +151,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, ...@@ -151,10 +151,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
/* If we emptied the pte, walk back up the ladder */ /* If we emptied the pte, walk back up the ladder */
if (page_empty(pte)) { if (page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr); clear_pmd_entry(kvm, pmd, addr);
next = pmd_addr_end(addr, end); next = kvm_pmd_addr_end(addr, end);
if (page_empty(pmd) && !page_empty(pud)) { if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr); clear_pud_entry(kvm, pud, addr);
next = pud_addr_end(addr, end); next = kvm_pud_addr_end(addr, end);
} }
} }
......
...@@ -115,6 +115,10 @@ static inline void kvm_set_s2pte_writable(pte_t *pte) ...@@ -115,6 +115,10 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
pte_val(*pte) |= PTE_S2_RDWR; pte_val(*pte) |= PTE_S2_RDWR;
} }
#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
struct kvm; struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment