Commit 90b41691 authored by Huang Ying's avatar Huang Ying Committed by Andrew Morton

mm: add framework for PCP high auto-tuning

The page allocation performance requirements of different workloads are
usually different.  So, we need to tune PCP (per-CPU pageset) high to
optimize the workload page allocation performance.  Now, we have a system
wide sysctl knob (percpu_pagelist_high_fraction) to tune PCP high by hand.
But, it's hard to find out the best value by hand.  And one global
configuration may not work best for the different workloads that run on
the same system.  One solution to these issues is to tune PCP high of each
CPU automatically.

This patch adds the framework for PCP high auto-tuning.  With it,
pcp->high of each CPU will be changed automatically by tuning algorithm at
runtime.  The minimal high (pcp->high_min) is the original PCP high value
calculated based on the low watermark pages.  While the maximal high
(pcp->high_max) is the PCP high value when percpu_pagelist_high_fraction
sysctl knob is set to MIN_PERCPU_PAGELIST_HIGH_FRACTION.  That is, the
maximal pcp->high that can be set via sysctl knob by hand.

It's possible that PCP high auto-tuning doesn't work well for some
workloads.  So, when PCP high is tuned by hand via the sysctl knob, the
auto-tuning will be disabled.  The PCP high set by hand will be used
instead.

This patch only adds the framework, so pcp->high will be set to
pcp->high_min (original default) always.  We will add actual auto-tuning
algorithm in the following patches in the series.

Link: https://lkml.kernel.org/r/20231016053002.756205-7-ying.huang@intel.comSigned-off-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Acked-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c0a24239
...@@ -693,6 +693,8 @@ struct per_cpu_pages { ...@@ -693,6 +693,8 @@ struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */ spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */ int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */ int high; /* high watermark, emptying needed */
int high_min; /* min high watermark */
int high_max; /* max high watermark */
int batch; /* chunk size for buddy add/remove */ int batch; /* chunk size for buddy add/remove */
u8 flags; /* protected by pcp->lock */ u8 flags; /* protected by pcp->lock */
u8 alloc_factor; /* batch scaling factor during allocate */ u8 alloc_factor; /* batch scaling factor during allocate */
...@@ -852,7 +854,8 @@ struct zone { ...@@ -852,7 +854,8 @@ struct zone {
* the high and batch values are copied to individual pagesets for * the high and batch values are copied to individual pagesets for
* faster access * faster access
*/ */
int pageset_high; int pageset_high_min;
int pageset_high_max;
int pageset_batch; int pageset_batch;
#ifndef CONFIG_SPARSEMEM #ifndef CONFIG_SPARSEMEM
......
...@@ -2350,7 +2350,7 @@ static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) ...@@ -2350,7 +2350,7 @@ static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
bool free_high) bool free_high)
{ {
int high = READ_ONCE(pcp->high); int high = READ_ONCE(pcp->high_min);
if (unlikely(!high || free_high)) if (unlikely(!high || free_high))
return 0; return 0;
...@@ -2689,7 +2689,7 @@ static int nr_pcp_alloc(struct per_cpu_pages *pcp, int order) ...@@ -2689,7 +2689,7 @@ static int nr_pcp_alloc(struct per_cpu_pages *pcp, int order)
{ {
int high, batch, max_nr_alloc; int high, batch, max_nr_alloc;
high = READ_ONCE(pcp->high); high = READ_ONCE(pcp->high_min);
batch = READ_ONCE(pcp->batch); batch = READ_ONCE(pcp->batch);
/* Check for PCP disabled or boot pageset */ /* Check for PCP disabled or boot pageset */
...@@ -5296,14 +5296,15 @@ static int zone_batchsize(struct zone *zone) ...@@ -5296,14 +5296,15 @@ static int zone_batchsize(struct zone *zone)
} }
static int percpu_pagelist_high_fraction; static int percpu_pagelist_high_fraction;
static int zone_highsize(struct zone *zone, int batch, int cpu_online) static int zone_highsize(struct zone *zone, int batch, int cpu_online,
int high_fraction)
{ {
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
int high; int high;
int nr_split_cpus; int nr_split_cpus;
unsigned long total_pages; unsigned long total_pages;
if (!percpu_pagelist_high_fraction) { if (!high_fraction) {
/* /*
* By default, the high value of the pcp is based on the zone * By default, the high value of the pcp is based on the zone
* low watermark so that if they are full then background * low watermark so that if they are full then background
...@@ -5316,15 +5317,15 @@ static int zone_highsize(struct zone *zone, int batch, int cpu_online) ...@@ -5316,15 +5317,15 @@ static int zone_highsize(struct zone *zone, int batch, int cpu_online)
* value is based on a fraction of the managed pages in the * value is based on a fraction of the managed pages in the
* zone. * zone.
*/ */
total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; total_pages = zone_managed_pages(zone) / high_fraction;
} }
/* /*
* Split the high value across all online CPUs local to the zone. Note * Split the high value across all online CPUs local to the zone. Note
* that early in boot that CPUs may not be online yet and that during * that early in boot that CPUs may not be online yet and that during
* CPU hotplug that the cpumask is not yet updated when a CPU is being * CPU hotplug that the cpumask is not yet updated when a CPU is being
* onlined. For memory nodes that have no CPUs, split pcp->high across * onlined. For memory nodes that have no CPUs, split the high value
* all online CPUs to mitigate the risk that reclaim is triggered * across all online CPUs to mitigate the risk that reclaim is triggered
* prematurely due to pages stored on pcp lists. * prematurely due to pages stored on pcp lists.
*/ */
nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
...@@ -5352,19 +5353,21 @@ static int zone_highsize(struct zone *zone, int batch, int cpu_online) ...@@ -5352,19 +5353,21 @@ static int zone_highsize(struct zone *zone, int batch, int cpu_online)
* However, guaranteeing these relations at all times would require e.g. write * However, guaranteeing these relations at all times would require e.g. write
* barriers here but also careful usage of read barriers at the read side, and * barriers here but also careful usage of read barriers at the read side, and
* thus be prone to error and bad for performance. Thus the update only prevents * thus be prone to error and bad for performance. Thus the update only prevents
* store tearing. Any new users of pcp->batch and pcp->high should ensure they * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
* can cope with those fields changing asynchronously, and fully trust only the * should ensure they can cope with those fields changing asynchronously, and
* pcp->count field on the local CPU with interrupts disabled. * fully trust only the pcp->count field on the local CPU with interrupts
* disabled.
* *
* mutex_is_locked(&pcp_batch_high_lock) required when calling this function * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
* outside of boot time (or some other assurance that no concurrent updaters * outside of boot time (or some other assurance that no concurrent updaters
* exist). * exist).
*/ */
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
unsigned long batch) unsigned long high_max, unsigned long batch)
{ {
WRITE_ONCE(pcp->batch, batch); WRITE_ONCE(pcp->batch, batch);
WRITE_ONCE(pcp->high, high); WRITE_ONCE(pcp->high_min, high_min);
WRITE_ONCE(pcp->high_max, high_max);
} }
static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
...@@ -5384,20 +5387,21 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta ...@@ -5384,20 +5387,21 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta
* need to be as careful as pageset_update() as nobody can access the * need to be as careful as pageset_update() as nobody can access the
* pageset yet. * pageset yet.
*/ */
pcp->high = BOOT_PAGESET_HIGH; pcp->high_min = BOOT_PAGESET_HIGH;
pcp->high_max = BOOT_PAGESET_HIGH;
pcp->batch = BOOT_PAGESET_BATCH; pcp->batch = BOOT_PAGESET_BATCH;
pcp->free_factor = 0; pcp->free_factor = 0;
} }
static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
unsigned long batch) unsigned long high_max, unsigned long batch)
{ {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
pageset_update(pcp, high, batch); pageset_update(pcp, high_min, high_max, batch);
} }
} }
...@@ -5407,19 +5411,34 @@ static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long h ...@@ -5407,19 +5411,34 @@ static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long h
*/ */
static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
{ {
int new_high, new_batch; int new_high_min, new_high_max, new_batch;
new_batch = max(1, zone_batchsize(zone)); new_batch = max(1, zone_batchsize(zone));
new_high = zone_highsize(zone, new_batch, cpu_online); if (percpu_pagelist_high_fraction) {
new_high_min = zone_highsize(zone, new_batch, cpu_online,
percpu_pagelist_high_fraction);
/*
* PCP high is tuned manually, disable auto-tuning via
* setting high_min and high_max to the manual value.
*/
new_high_max = new_high_min;
} else {
new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
new_high_max = zone_highsize(zone, new_batch, cpu_online,
MIN_PERCPU_PAGELIST_HIGH_FRACTION);
}
if (zone->pageset_high == new_high && if (zone->pageset_high_min == new_high_min &&
zone->pageset_high_max == new_high_max &&
zone->pageset_batch == new_batch) zone->pageset_batch == new_batch)
return; return;
zone->pageset_high = new_high; zone->pageset_high_min = new_high_min;
zone->pageset_high_max = new_high_max;
zone->pageset_batch = new_batch; zone->pageset_batch = new_batch;
__zone_set_pageset_high_and_batch(zone, new_high, new_batch); __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
new_batch);
} }
void __meminit setup_zone_pageset(struct zone *zone) void __meminit setup_zone_pageset(struct zone *zone)
...@@ -5528,7 +5547,8 @@ __meminit void zone_pcp_init(struct zone *zone) ...@@ -5528,7 +5547,8 @@ __meminit void zone_pcp_init(struct zone *zone)
*/ */
zone->per_cpu_pageset = &boot_pageset; zone->per_cpu_pageset = &boot_pageset;
zone->per_cpu_zonestats = &boot_zonestats; zone->per_cpu_zonestats = &boot_zonestats;
zone->pageset_high = BOOT_PAGESET_HIGH; zone->pageset_high_min = BOOT_PAGESET_HIGH;
zone->pageset_high_max = BOOT_PAGESET_HIGH;
zone->pageset_batch = BOOT_PAGESET_BATCH; zone->pageset_batch = BOOT_PAGESET_BATCH;
if (populated_zone(zone)) if (populated_zone(zone))
...@@ -6430,13 +6450,14 @@ EXPORT_SYMBOL(free_contig_range); ...@@ -6430,13 +6450,14 @@ EXPORT_SYMBOL(free_contig_range);
void zone_pcp_disable(struct zone *zone) void zone_pcp_disable(struct zone *zone)
{ {
mutex_lock(&pcp_batch_high_lock); mutex_lock(&pcp_batch_high_lock);
__zone_set_pageset_high_and_batch(zone, 0, 1); __zone_set_pageset_high_and_batch(zone, 0, 0, 1);
__drain_all_pages(zone, true); __drain_all_pages(zone, true);
} }
void zone_pcp_enable(struct zone *zone) void zone_pcp_enable(struct zone *zone)
{ {
__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
zone->pageset_high_max, zone->pageset_batch);
mutex_unlock(&pcp_batch_high_lock); mutex_unlock(&pcp_batch_high_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment