Commit 0a264884 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds

mm/vmalloc: rename vmap_*_range vmap_pages_*_range

The vmalloc mapper operates on a struct page * array rather than a linear
physical address, re-name it to make this distinction clear.

Link: https://lkml.kernel.org/r/20210317062402.533919-5-npiggin@gmail.comSigned-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ding Tianhong <dingtianhong@huawei.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0c95cba4
...@@ -189,7 +189,7 @@ void unmap_kernel_range_noflush(unsigned long start, unsigned long size) ...@@ -189,7 +189,7 @@ void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
arch_sync_kernel_mappings(start, end); arch_sync_kernel_mappings(start, end);
} }
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr, unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask) pgtbl_mod_mask *mask)
{ {
...@@ -217,7 +217,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -217,7 +217,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
return 0; return 0;
} }
static int vmap_pmd_range(pud_t *pud, unsigned long addr, static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr, unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask) pgtbl_mod_mask *mask)
{ {
...@@ -229,13 +229,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, ...@@ -229,13 +229,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
return -ENOMEM; return -ENOMEM;
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask)) if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
return -ENOMEM; return -ENOMEM;
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
return 0; return 0;
} }
static int vmap_pud_range(p4d_t *p4d, unsigned long addr, static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr, unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask) pgtbl_mod_mask *mask)
{ {
...@@ -247,13 +247,13 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, ...@@ -247,13 +247,13 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
return -ENOMEM; return -ENOMEM;
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask)) if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
return -ENOMEM; return -ENOMEM;
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
return 0; return 0;
} }
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr, unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask) pgtbl_mod_mask *mask)
{ {
...@@ -265,7 +265,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, ...@@ -265,7 +265,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
return -ENOMEM; return -ENOMEM;
do { do {
next = p4d_addr_end(addr, end); next = p4d_addr_end(addr, end);
if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask)) if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
return -ENOMEM; return -ENOMEM;
} while (p4d++, addr = next, addr != end); } while (p4d++, addr = next, addr != end);
return 0; return 0;
...@@ -306,7 +306,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size, ...@@ -306,7 +306,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_bad(*pgd)) if (pgd_bad(*pgd))
mask |= PGTBL_PGD_MODIFIED; mask |= PGTBL_PGD_MODIFIED;
err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
if (err) if (err)
return err; return err;
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment