Commit e0932b6c authored by Johannes Weiner's avatar Johannes Weiner Committed by Andrew Morton

mm: page_alloc: consolidate free page accounting

Free page accounting currently happens a bit too high up the call stack,
where it has to deal with guard pages, compaction capturing, block
stealing and even page isolation.  This is subtle and fragile, and makes
it difficult to hack on the code.

Now that type violations on the freelists have been fixed, push the
accounting down to where pages enter and leave the freelist.

[hannes@cmpxchg.org: undo unrelated drive-by line wrap]
  Link: https://lkml.kernel.org/r/20240327185736.GA7597@cmpxchg.org
[hannes@cmpxchg.org: remove unused page parameter from account_freepages()]
  Link: https://lkml.kernel.org/r/20240327185831.GB7597@cmpxchg.org
[baolin.wang@linux.alibaba.com: fix free page accounting]
  Link: https://lkml.kernel.org/r/a2a48baca69f103aa431fd201f8a06e3b95e203d.1712648441.git.baolin.wang@linux.alibaba.com
[andriy.shevchenko@linux.intel.com: avoid defining unused function]
  Link: https://lkml.kernel.org/r/20240423161506.2637177-1-andriy.shevchenko@linux.intel.com
Link: https://lkml.kernel.org/r/20240320180429.678181-11-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fd919a85
......@@ -3797,24 +3797,22 @@ static inline bool page_is_guard(struct page *page)
return PageGuard(page);
}
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype);
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
unsigned int order)
{
if (!debug_guardpage_enabled())
return false;
return __set_page_guard(zone, page, order, migratetype);
return __set_page_guard(zone, page, order);
}
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype);
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
unsigned int order)
{
if (!debug_guardpage_enabled())
return;
__clear_page_guard(zone, page, order, migratetype);
__clear_page_guard(zone, page, order);
}
#else /* CONFIG_DEBUG_PAGEALLOC */
......@@ -3824,9 +3822,9 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
static inline bool page_is_guard(struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) { return false; }
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
unsigned int order) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
......
......@@ -487,14 +487,6 @@ static inline void node_stat_sub_folio(struct folio *folio,
mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
}
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}
extern const char * const vmstat_text[];
static inline const char *zone_stat_name(enum zone_stat_item item)
......
......@@ -32,8 +32,7 @@ static int __init debug_guardpage_minorder_setup(char *buf)
}
early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype)
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order)
{
if (order >= debug_guardpage_minorder())
return false;
......@@ -41,19 +40,12 @@ bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
__SetPageGuard(page);
INIT_LIST_HEAD(&page->buddy_list);
set_page_private(page, order);
/* Guard pages are not available for any usage */
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
return true;
}
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype)
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order)
{
__ClearPageGuard(page);
set_page_private(page, 0);
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, (1 << order), migratetype);
}
......@@ -1039,11 +1039,6 @@ static inline bool is_migrate_highatomic(enum migratetype migratetype)
return migratetype == MIGRATE_HIGHATOMIC;
}
static inline bool is_migrate_highatomic_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
}
void setup_zone_pageset(struct zone *zone);
struct migration_target_control {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment