Commit 6060df84 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

ARM: KVM: simplify HYP mapping population

The way we populate HYP mappings is a bit convoluted, to say the least.
Passing a pointer around to keep track of the current PFN is quite
odd, and we end-up having two different PTE accessors for no good
reason.

Simplify the whole thing by unifying the two PTE accessors, passing
a pgprot_t around, and moving the various validity checks to the
upper layers.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <cdall@cs.columbia.edu>
parent 372b7c1b
...@@ -125,54 +125,34 @@ void free_hyp_pmds(void) ...@@ -125,54 +125,34 @@ void free_hyp_pmds(void)
} }
static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
unsigned long end) unsigned long end, unsigned long pfn,
pgprot_t prot)
{ {
pte_t *pte; pte_t *pte;
unsigned long addr; unsigned long addr;
struct page *page;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
unsigned long hyp_addr = KERN_TO_HYP(addr); pte = pte_offset_kernel(pmd, addr);
kvm_set_pte(pte, pfn_pte(pfn, prot));
pte = pte_offset_kernel(pmd, hyp_addr); pfn++;
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
}
}
static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
unsigned long end,
unsigned long *pfn_base)
{
pte_t *pte;
unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
unsigned long hyp_addr = KERN_TO_HYP(addr);
pte = pte_offset_kernel(pmd, hyp_addr);
BUG_ON(pfn_valid(*pfn_base));
kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
(*pfn_base)++;
} }
} }
static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
unsigned long end, unsigned long *pfn_base) unsigned long end, unsigned long pfn,
pgprot_t prot)
{ {
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
unsigned long addr, next; unsigned long addr, next;
for (addr = start; addr < end; addr = next) { for (addr = start; addr < end; addr = next) {
unsigned long hyp_addr = KERN_TO_HYP(addr); pmd = pmd_offset(pud, addr);
pmd = pmd_offset(pud, hyp_addr);
BUG_ON(pmd_sect(*pmd)); BUG_ON(pmd_sect(*pmd));
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
pte = pte_alloc_one_kernel(NULL, hyp_addr); pte = pte_alloc_one_kernel(NULL, addr);
if (!pte) { if (!pte) {
kvm_err("Cannot allocate Hyp pte\n"); kvm_err("Cannot allocate Hyp pte\n");
return -ENOMEM; return -ENOMEM;
...@@ -182,25 +162,17 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, ...@@ -182,25 +162,17 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
/* create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
* If pfn_base is NULL, we map kernel pages into HYP with the pfn += (next - addr) >> PAGE_SHIFT;
* virtual address. Otherwise, this is considered an I/O
* mapping and we map the physical region starting at
* *pfn_base to [start, end[.
*/
if (!pfn_base)
create_hyp_pte_mappings(pmd, addr, next);
else
create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
} }
return 0; return 0;
} }
static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) static int __create_hyp_mappings(pgd_t *pgdp,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{ {
unsigned long start = (unsigned long)from;
unsigned long end = (unsigned long)to;
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
...@@ -209,21 +181,14 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) ...@@ -209,21 +181,14 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
if (start >= end) if (start >= end)
return -EINVAL; return -EINVAL;
/* Check for a valid kernel memory mapping */
if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
return -EINVAL;
/* Check for a valid kernel IO mapping */
if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
return -EINVAL;
mutex_lock(&kvm_hyp_pgd_mutex); mutex_lock(&kvm_hyp_pgd_mutex);
for (addr = start; addr < end; addr = next) { for (addr = start & PAGE_MASK; addr < end; addr = next) {
unsigned long hyp_addr = KERN_TO_HYP(addr); pgd = pgdp + pgd_index(addr);
pgd = hyp_pgd + pgd_index(hyp_addr); pud = pud_offset(pgd, addr);
pud = pud_offset(pgd, hyp_addr);
if (pud_none_or_clear_bad(pud)) { if (pud_none_or_clear_bad(pud)) {
pmd = pmd_alloc_one(NULL, hyp_addr); pmd = pmd_alloc_one(NULL, addr);
if (!pmd) { if (!pmd) {
kvm_err("Cannot allocate Hyp pmd\n"); kvm_err("Cannot allocate Hyp pmd\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -233,9 +198,10 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) ...@@ -233,9 +198,10 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
} }
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
if (err) if (err)
goto out; goto out;
pfn += (next - addr) >> PAGE_SHIFT;
} }
out: out:
mutex_unlock(&kvm_hyp_pgd_mutex); mutex_unlock(&kvm_hyp_pgd_mutex);
...@@ -255,22 +221,38 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) ...@@ -255,22 +221,38 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
*/ */
int create_hyp_mappings(void *from, void *to) int create_hyp_mappings(void *from, void *to)
{ {
return __create_hyp_mappings(from, to, NULL); unsigned long phys_addr = virt_to_phys(from);
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
/* Check for a valid kernel memory mapping */
if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
return -EINVAL;
return __create_hyp_mappings(hyp_pgd, start, end,
__phys_to_pfn(phys_addr), PAGE_HYP);
} }
/** /**
* create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
* @from: The kernel start VA of the range * @from: The kernel start VA of the range
* @to: The kernel end VA of the range (exclusive) * @to: The kernel end VA of the range (exclusive)
* @addr: The physical start address which gets mapped * @phys_addr: The physical start address which gets mapped
* *
* The resulting HYP VA is the same as the kernel VA, modulo * The resulting HYP VA is the same as the kernel VA, modulo
* HYP_PAGE_OFFSET. * HYP_PAGE_OFFSET.
*/ */
int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
{ {
unsigned long pfn = __phys_to_pfn(addr); unsigned long start = KERN_TO_HYP((unsigned long)from);
return __create_hyp_mappings(from, to, &pfn); unsigned long end = KERN_TO_HYP((unsigned long)to);
/* Check for a valid kernel IO mapping */
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
return __create_hyp_mappings(hyp_pgd, start, end,
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment