Commit c9118e6c authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

arch, mm: replace for_each_memblock() with for_each_mem_pfn_range()

There are several occurrences of the following pattern:

	for_each_memblock(memory, reg) {
		start_pfn = memblock_region_memory_base_pfn(reg);
		end_pfn = memblock_region_memory_end_pfn(reg);

		/* do something with start_pfn and end_pfn */
	}

Rather than iterate over all memblock.memory regions and each time query
for their start and end PFNs, use for_each_mem_pfn_range() iterator to get
simpler and clearer code.
Signed-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>	[.clang-format]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Emil Renner Berthing <kernel@esmil.dk>
Cc: Hari Bathini <hbathini@linux.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: https://lkml.kernel.org/r/20200818151634.14343-12-rppt@kernel.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6e245ad4
...@@ -299,16 +299,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) ...@@ -299,16 +299,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
*/ */
static void __init free_unused_memmap(void) static void __init free_unused_memmap(void)
{ {
unsigned long start, prev_end = 0; unsigned long start, end, prev_end = 0;
struct memblock_region *reg; int i;
/* /*
* This relies on each bank being in address order. * This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init(). * The banks are sorted previously in bootmem_init().
*/ */
for_each_memblock(memory, reg) { for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
start = memblock_region_memory_base_pfn(reg);
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
/* /*
* Take care not to free memmap entries that don't exist * Take care not to free memmap entries that don't exist
...@@ -336,8 +334,7 @@ static void __init free_unused_memmap(void) ...@@ -336,8 +334,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to * memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES. * MAX_ORDER_NR_PAGES.
*/ */
prev_end = ALIGN(memblock_region_memory_end_pfn(reg), prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
MAX_ORDER_NR_PAGES);
} }
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
......
...@@ -471,12 +471,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) ...@@ -471,12 +471,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
*/ */
static void __init free_unused_memmap(void) static void __init free_unused_memmap(void)
{ {
unsigned long start, prev_end = 0; unsigned long start, end, prev_end = 0;
struct memblock_region *reg; int i;
for_each_memblock(memory, reg) {
start = __phys_to_pfn(reg->base);
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
/* /*
* Take care not to free memmap entries that don't exist due * Take care not to free memmap entries that don't exist due
...@@ -496,8 +494,7 @@ static void __init free_unused_memmap(void) ...@@ -496,8 +494,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to * memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES. * MAX_ORDER_NR_PAGES.
*/ */
prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
MAX_ORDER_NR_PAGES);
} }
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
......
...@@ -1242,14 +1242,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, ...@@ -1242,14 +1242,15 @@ static void fadump_free_reserved_memory(unsigned long start_pfn,
*/ */
static void fadump_release_reserved_area(u64 start, u64 end) static void fadump_release_reserved_area(u64 start, u64 end)
{ {
u64 tstart, tend, spfn, epfn; u64 tstart, tend, spfn, epfn, reg_spfn, reg_epfn, i;
struct memblock_region *reg;
spfn = PHYS_PFN(start); spfn = PHYS_PFN(start);
epfn = PHYS_PFN(end); epfn = PHYS_PFN(end);
for_each_memblock(memory, reg) {
tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); for_each_mem_pfn_range(i, MAX_NUMNODES, &reg_spfn, &reg_epfn, NULL) {
tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); tstart = max_t(u64, spfn, reg_spfn);
tend = min_t(u64, epfn, reg_epfn);
if (tstart < tend) { if (tstart < tend) {
fadump_free_reserved_memory(tstart, tend); fadump_free_reserved_memory(tstart, tend);
......
...@@ -184,15 +184,16 @@ void __init initmem_init(void) ...@@ -184,15 +184,16 @@ void __init initmem_init(void)
/* mark pages that don't exist as nosave */ /* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void) static int __init mark_nonram_nosave(void)
{ {
struct memblock_region *reg, *prev = NULL; unsigned long spfn, epfn, prev = 0;
int i;
for_each_memblock(memory, reg) { for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
if (prev && if (prev && prev < spfn)
memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) register_nosave_region(prev, spfn);
register_nosave_region(memblock_region_memory_end_pfn(prev),
memblock_region_memory_base_pfn(reg)); prev = epfn;
prev = reg;
} }
return 0; return 0;
} }
#else /* CONFIG_NEED_MULTIPLE_NODES */ #else /* CONFIG_NEED_MULTIPLE_NODES */
......
...@@ -804,17 +804,14 @@ static void __init setup_nonnuma(void) ...@@ -804,17 +804,14 @@ static void __init setup_nonnuma(void)
unsigned long total_ram = memblock_phys_mem_size(); unsigned long total_ram = memblock_phys_mem_size();
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
unsigned int nid = 0; unsigned int nid = 0;
struct memblock_region *reg; int i;
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram); top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n", printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20); (top_of_ram - total_ram) >> 20);
for_each_memblock(memory, reg) { for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
fake_numa_create_new_node(end_pfn, &nid); fake_numa_create_new_node(end_pfn, &nid);
memblock_set_node(PFN_PHYS(start_pfn), memblock_set_node(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn), PFN_PHYS(end_pfn - start_pfn),
......
...@@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) ...@@ -183,9 +183,9 @@ static void mark_kernel_pgd(void)
void __init cmma_init_nodat(void) void __init cmma_init_nodat(void)
{ {
struct memblock_region *reg;
struct page *page; struct page *page;
unsigned long start, end, ix; unsigned long start, end, ix;
int i;
if (cmma_flag < 2) if (cmma_flag < 2)
return; return;
...@@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) ...@@ -193,9 +193,7 @@ void __init cmma_init_nodat(void)
mark_kernel_pgd(); mark_kernel_pgd();
/* Set all kernel pages not used for page tables to stable/no-dat */ /* Set all kernel pages not used for page tables to stable/no-dat */
for_each_memblock(memory, reg) { for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
start = memblock_region_memory_base_pfn(reg);
end = memblock_region_memory_end_pfn(reg);
page = pfn_to_page(start); page = pfn_to_page(start);
for (ix = start; ix < end; ix++, page++) { for (ix = start; ix < end; ix++, page++) {
if (__test_and_clear_bit(PG_arch_1, &page->flags)) if (__test_and_clear_bit(PG_arch_1, &page->flags))
......
...@@ -226,15 +226,12 @@ void __init allocate_pgdat(unsigned int nid) ...@@ -226,15 +226,12 @@ void __init allocate_pgdat(unsigned int nid)
static void __init do_init_bootmem(void) static void __init do_init_bootmem(void)
{ {
struct memblock_region *reg; unsigned long start_pfn, end_pfn;
int i;
/* Add active regions with valid PFNs. */ /* Add active regions with valid PFNs. */
for_each_memblock(memory, reg) { for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
unsigned long start_pfn, end_pfn;
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
__add_active_range(0, start_pfn, end_pfn); __add_active_range(0, start_pfn, end_pfn);
}
/* All of system RAM sits in node 0 for the non-NUMA case */ /* All of system RAM sits in node 0 for the non-NUMA case */
allocate_pgdat(0); allocate_pgdat(0);
......
...@@ -1663,12 +1663,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void) ...@@ -1663,12 +1663,10 @@ phys_addr_t __init_memblock memblock_reserved_size(void)
phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
{ {
unsigned long pages = 0; unsigned long pages = 0;
struct memblock_region *r;
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
int i;
for_each_memblock(memory, r) { for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
start_pfn = memblock_region_memory_base_pfn(r);
end_pfn = memblock_region_memory_end_pfn(r);
start_pfn = min_t(unsigned long, start_pfn, limit_pfn); start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
end_pfn = min_t(unsigned long, end_pfn, limit_pfn); end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
pages += end_pfn - start_pfn; pages += end_pfn - start_pfn;
......
...@@ -291,13 +291,11 @@ static void __init memory_present(int nid, unsigned long start, unsigned long en ...@@ -291,13 +291,11 @@ static void __init memory_present(int nid, unsigned long start, unsigned long en
*/ */
static void __init memblocks_present(void) static void __init memblocks_present(void)
{ {
struct memblock_region *reg; unsigned long start, end;
int i, nid;
for_each_memblock(memory, reg) { for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
memory_present(memblock_get_region_node(reg), memory_present(nid, start, end);
memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment