Commit c164fbb4 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Linus Torvalds

x86/mm: thread pgprot_t through init_memory_mapping()

In preparation to support a pgprot_t argument for arch_add_memory().

It's required to move the prototype of init_memory_mapping() seeing the
original location came before the definition of pgprot_t.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200306170846.9333-4-logang@deltatee.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f5637d3b
...@@ -71,9 +71,6 @@ static inline phys_addr_t get_max_mapped(void) ...@@ -71,9 +71,6 @@ static inline phys_addr_t get_max_mapped(void)
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(void); extern void initmem_init(void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -1081,6 +1081,9 @@ static inline void __meminit init_trampoline_default(void) ...@@ -1081,6 +1081,9 @@ static inline void __meminit init_trampoline_default(void)
void __init poking_init(void); void __init poking_init(void);
unsigned long init_memory_mapping(unsigned long start,
unsigned long end, pgprot_t prot);
# ifdef CONFIG_RANDOMIZE_MEMORY # ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void); void __meminit init_trampoline(void);
# else # else
......
...@@ -744,7 +744,8 @@ int __init gart_iommu_init(void) ...@@ -744,7 +744,8 @@ int __init gart_iommu_init(void)
start_pfn = PFN_DOWN(aper_base); start_pfn = PFN_DOWN(aper_base);
if (!pfn_range_is_mapped(start_pfn, end_pfn)) if (!pfn_range_is_mapped(start_pfn, end_pfn))
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
PAGE_KERNEL);
pr_info("PCI-DMA: using GART IOMMU.\n"); pr_info("PCI-DMA: using GART IOMMU.\n");
iommu_size = check_iommu_size(info.aper_base, aper_size); iommu_size = check_iommu_size(info.aper_base, aper_size);
......
...@@ -467,7 +467,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) ...@@ -467,7 +467,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
* the physical memory. To access them they are temporarily mapped. * the physical memory. To access them they are temporarily mapped.
*/ */
unsigned long __ref init_memory_mapping(unsigned long start, unsigned long __ref init_memory_mapping(unsigned long start,
unsigned long end) unsigned long end, pgprot_t prot)
{ {
struct map_range mr[NR_RANGE_MR]; struct map_range mr[NR_RANGE_MR];
unsigned long ret = 0; unsigned long ret = 0;
...@@ -481,7 +481,8 @@ unsigned long __ref init_memory_mapping(unsigned long start, ...@@ -481,7 +481,8 @@ unsigned long __ref init_memory_mapping(unsigned long start,
for (i = 0; i < nr_range; i++) for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask); mr[i].page_size_mask,
prot);
add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
...@@ -521,7 +522,7 @@ static unsigned long __init init_range_memory_mapping( ...@@ -521,7 +522,7 @@ static unsigned long __init init_range_memory_mapping(
*/ */
can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
min(end, (u64)pgt_buf_top<<PAGE_SHIFT); min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
init_memory_mapping(start, end); init_memory_mapping(start, end, PAGE_KERNEL);
mapped_ram_size += end - start; mapped_ram_size += end - start;
can_use_brk_pgt = true; can_use_brk_pgt = true;
} }
...@@ -661,7 +662,7 @@ void __init init_mem_mapping(void) ...@@ -661,7 +662,7 @@ void __init init_mem_mapping(void)
#endif #endif
/* the ISA range is always mapped regardless of memory holes */ /* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS); init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
/* Init the trampoline, possibly with KASLR memory offset */ /* Init the trampoline, possibly with KASLR memory offset */
init_trampoline(); init_trampoline();
......
...@@ -257,7 +257,8 @@ static inline int __is_kernel_text(unsigned long addr) ...@@ -257,7 +257,8 @@ static inline int __is_kernel_text(unsigned long addr)
unsigned long __init unsigned long __init
kernel_physical_mapping_init(unsigned long start, kernel_physical_mapping_init(unsigned long start,
unsigned long end, unsigned long end,
unsigned long page_size_mask) unsigned long page_size_mask,
pgprot_t prot)
{ {
int use_pse = page_size_mask == (1<<PG_LEVEL_2M); int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
unsigned long last_map_addr = end; unsigned long last_map_addr = end;
......
...@@ -585,7 +585,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, ...@@ -585,7 +585,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
*/ */
static unsigned long __meminit static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask, bool init) unsigned long page_size_mask, pgprot_t _prot, bool init)
{ {
unsigned long pages = 0, paddr_next; unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end; unsigned long paddr_last = paddr_end;
...@@ -595,7 +595,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -595,7 +595,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL; pgprot_t prot = _prot;
vaddr = (unsigned long)__va(paddr); vaddr = (unsigned long)__va(paddr);
pud = pud_page + pud_index(vaddr); pud = pud_page + pud_index(vaddr);
...@@ -644,9 +644,12 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -644,9 +644,12 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_1G)) { if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++; pages++;
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
set_pte_init((pte_t *)pud, set_pte_init((pte_t *)pud,
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE), prot),
init); init);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
paddr_last = paddr_next; paddr_last = paddr_next;
...@@ -669,7 +672,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -669,7 +672,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
static unsigned long __meminit static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask, bool init) unsigned long page_size_mask, pgprot_t prot, bool init)
{ {
unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
...@@ -679,7 +682,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, ...@@ -679,7 +682,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
if (!pgtable_l5_enabled()) if (!pgtable_l5_enabled())
return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
page_size_mask, init); page_size_mask, prot, init);
for (; vaddr < vaddr_end; vaddr = vaddr_next) { for (; vaddr < vaddr_end; vaddr = vaddr_next) {
p4d_t *p4d = p4d_page + p4d_index(vaddr); p4d_t *p4d = p4d_page + p4d_index(vaddr);
...@@ -702,13 +705,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, ...@@ -702,13 +705,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
if (!p4d_none(*p4d)) { if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, 0); pud = pud_offset(p4d, 0);
paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
page_size_mask, init); page_size_mask, prot, init);
continue; continue;
} }
pud = alloc_low_page(); pud = alloc_low_page();
paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
page_size_mask, init); page_size_mask, prot, init);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
p4d_populate_init(&init_mm, p4d, pud, init); p4d_populate_init(&init_mm, p4d, pud, init);
...@@ -722,7 +725,7 @@ static unsigned long __meminit ...@@ -722,7 +725,7 @@ static unsigned long __meminit
__kernel_physical_mapping_init(unsigned long paddr_start, __kernel_physical_mapping_init(unsigned long paddr_start,
unsigned long paddr_end, unsigned long paddr_end,
unsigned long page_size_mask, unsigned long page_size_mask,
bool init) pgprot_t prot, bool init)
{ {
bool pgd_changed = false; bool pgd_changed = false;
unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
...@@ -743,13 +746,13 @@ __kernel_physical_mapping_init(unsigned long paddr_start, ...@@ -743,13 +746,13 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
paddr_last = phys_p4d_init(p4d, __pa(vaddr), paddr_last = phys_p4d_init(p4d, __pa(vaddr),
__pa(vaddr_end), __pa(vaddr_end),
page_size_mask, page_size_mask,
init); prot, init);
continue; continue;
} }
p4d = alloc_low_page(); p4d = alloc_low_page();
paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
page_size_mask, init); page_size_mask, prot, init);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
if (pgtable_l5_enabled()) if (pgtable_l5_enabled())
...@@ -778,10 +781,10 @@ __kernel_physical_mapping_init(unsigned long paddr_start, ...@@ -778,10 +781,10 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
unsigned long __meminit unsigned long __meminit
kernel_physical_mapping_init(unsigned long paddr_start, kernel_physical_mapping_init(unsigned long paddr_start,
unsigned long paddr_end, unsigned long paddr_end,
unsigned long page_size_mask) unsigned long page_size_mask, pgprot_t prot)
{ {
return __kernel_physical_mapping_init(paddr_start, paddr_end, return __kernel_physical_mapping_init(paddr_start, paddr_end,
page_size_mask, true); page_size_mask, prot, true);
} }
/* /*
...@@ -796,7 +799,8 @@ kernel_physical_mapping_change(unsigned long paddr_start, ...@@ -796,7 +799,8 @@ kernel_physical_mapping_change(unsigned long paddr_start,
unsigned long page_size_mask) unsigned long page_size_mask)
{ {
return __kernel_physical_mapping_init(paddr_start, paddr_end, return __kernel_physical_mapping_init(paddr_start, paddr_end,
page_size_mask, false); page_size_mask, PAGE_KERNEL,
false);
} }
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
...@@ -863,7 +867,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -863,7 +867,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
init_memory_mapping(start, start + size); init_memory_mapping(start, start + size, PAGE_KERNEL);
return add_pages(nid, start_pfn, nr_pages, params); return add_pages(nid, start_pfn, nr_pages, params);
} }
......
...@@ -12,7 +12,8 @@ void early_ioremap_page_table_range_init(void); ...@@ -12,7 +12,8 @@ void early_ioremap_page_table_range_init(void);
unsigned long kernel_physical_mapping_init(unsigned long start, unsigned long kernel_physical_mapping_init(unsigned long start,
unsigned long end, unsigned long end,
unsigned long page_size_mask); unsigned long page_size_mask,
pgprot_t prot);
unsigned long kernel_physical_mapping_change(unsigned long start, unsigned long kernel_physical_mapping_change(unsigned long start,
unsigned long end, unsigned long end,
unsigned long page_size_mask); unsigned long page_size_mask);
......
...@@ -352,7 +352,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, ...@@ -352,7 +352,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
if (type == EFI_MEMORY_MAPPED_IO) if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size); return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
PAGE_KERNEL);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT; unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type, attribute); efi_ioremap(top, size - (top - phys_addr), type, attribute);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment