Commit 4f9bc69a authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: reuse pageblock_start/end_pfn() macro

Move pageblock_start_pfn/pageblock_end_pfn() into pageblock-flags.h, then
they could be used somewhere else, not only in compaction, also use
ALIGN_DOWN() instead of round_down() to be pair with ALIGN(), which should
be same for pageblock usage.

Link: https://lkml.kernel.org/r/20220907060844.126891-1-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0bba9af0
...@@ -53,6 +53,8 @@ extern unsigned int pageblock_order; ...@@ -53,6 +53,8 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order) #define pageblock_nr_pages (1UL << pageblock_order)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */ /* Forward declaration */
struct page; struct page;
......
...@@ -52,8 +52,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta) ...@@ -52,8 +52,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
/* /*
* Page order with-respect-to which proactive compaction * Page order with-respect-to which proactive compaction
......
...@@ -2000,7 +2000,7 @@ static void __init free_unused_memmap(void) ...@@ -2000,7 +2000,7 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside * presume that there are no holes in the memory map inside
* a pageblock * a pageblock
*/ */
start = round_down(start, pageblock_nr_pages); start = pageblock_start_pfn(start);
/* /*
* If we had a previous bank, and there is a space * If we had a previous bank, and there is a space
......
...@@ -544,7 +544,7 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) ...@@ -544,7 +544,7 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1); pfn &= (PAGES_PER_SECTION-1);
#else #else
pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
} }
...@@ -1857,7 +1857,7 @@ void set_zone_contiguous(struct zone *zone) ...@@ -1857,7 +1857,7 @@ void set_zone_contiguous(struct zone *zone)
unsigned long block_start_pfn = zone->zone_start_pfn; unsigned long block_start_pfn = zone->zone_start_pfn;
unsigned long block_end_pfn; unsigned long block_end_pfn;
block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); block_end_pfn = pageblock_end_pfn(block_start_pfn);
for (; block_start_pfn < zone_end_pfn(zone); for (; block_start_pfn < zone_end_pfn(zone);
block_start_pfn = block_end_pfn, block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) { block_end_pfn += pageblock_nr_pages) {
...@@ -2653,8 +2653,8 @@ int move_freepages_block(struct zone *zone, struct page *page, ...@@ -2653,8 +2653,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
*num_movable = 0; *num_movable = 0;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
start_pfn = pfn & ~(pageblock_nr_pages - 1); start_pfn = pageblock_start_pfn(pfn);
end_pfn = start_pfn + pageblock_nr_pages - 1; end_pfn = pageblock_end_pfn(pfn) - 1;
/* Do not cross zone boundaries */ /* Do not cross zone boundaries */
if (!zone_spans_pfn(zone, start_pfn)) if (!zone_spans_pfn(zone, start_pfn))
...@@ -6934,9 +6934,8 @@ static void __init init_unavailable_range(unsigned long spfn, ...@@ -6934,9 +6934,8 @@ static void __init init_unavailable_range(unsigned long spfn,
u64 pgcnt = 0; u64 pgcnt = 0;
for (pfn = spfn; pfn < epfn; pfn++) { for (pfn = spfn; pfn < epfn; pfn++) {
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { if (!pfn_valid(pageblock_start_pfn(pfn))) {
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) pfn = pageblock_end_pfn(pfn) - 1;
+ pageblock_nr_pages - 1;
continue; continue;
} }
__init_single_page(pfn_to_page(pfn), pfn, zone, node); __init_single_page(pfn_to_page(pfn), pfn, zone, node);
......
...@@ -37,8 +37,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e ...@@ -37,8 +37,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
unsigned long pfn; unsigned long pfn;
VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) != VM_BUG_ON(pageblock_start_pfn(start_pfn) !=
ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages)); pageblock_start_pfn(end_pfn - 1));
if (is_migrate_cma_page(page)) { if (is_migrate_cma_page(page)) {
/* /*
...@@ -172,7 +172,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ ...@@ -172,7 +172,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
* to avoid redundant checks. * to avoid redundant checks.
*/ */
check_unmovable_start = max(page_to_pfn(page), start_pfn); check_unmovable_start = max(page_to_pfn(page), start_pfn);
check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages), check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
end_pfn); end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
...@@ -532,7 +532,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -532,7 +532,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn; unsigned long pfn;
struct page *page; struct page *page;
/* isolation is done at page block granularity */ /* isolation is done at page block granularity */
unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages); unsigned long isolate_start = pageblock_start_pfn(start_pfn);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
int ret; int ret;
bool skip_isolation = false; bool skip_isolation = false;
...@@ -579,10 +579,9 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -579,10 +579,9 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{ {
unsigned long pfn; unsigned long pfn;
struct page *page; struct page *page;
unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages); unsigned long isolate_start = pageblock_start_pfn(start_pfn);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
for (pfn = isolate_start; for (pfn = isolate_start;
pfn < isolate_end; pfn < isolate_end;
pfn += pageblock_nr_pages) { pfn += pageblock_nr_pages) {
......
...@@ -297,7 +297,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m, ...@@ -297,7 +297,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
continue; continue;
} }
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn); block_end_pfn = min(block_end_pfn, end_pfn);
pageblock_mt = get_pageblock_migratetype(page); pageblock_mt = get_pageblock_migratetype(page);
...@@ -635,7 +635,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) ...@@ -635,7 +635,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue; continue;
} }
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn); block_end_pfn = min(block_end_pfn, end_pfn);
for (; pfn < block_end_pfn; pfn++) { for (; pfn < block_end_pfn; pfn++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment