Commit 43c95bcc authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm/page_alloc: reduce duration that IRQs are disabled for VM counters

IRQs are left disabled for the zone and node VM event counters.  This is
unnecessary as the affected counters are allowed to race for preemmption
and IRQs.

This patch reduces the scope of IRQs being disabled via
local_[lock|unlock]_irq on !PREEMPT_RT kernels.  One
__mod_zone_freepage_state is still called with IRQs disabled.  While this
could be moved out, it's not free on all architectures as some require
IRQs to be disabled for mod_zone_page_state on !PREEMPT_RT kernels.

Link: https://lkml.kernel.org/r/20210512095458.30632-7-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3e23060b
...@@ -3530,11 +3530,11 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, ...@@ -3530,11 +3530,11 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
pcp = this_cpu_ptr(zone->per_cpu_pageset); pcp = this_cpu_ptr(zone->per_cpu_pageset);
list = &pcp->lists[migratetype]; list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
local_unlock_irqrestore(&pagesets.lock, flags);
if (page) { if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone, 1); zone_statistics(preferred_zone, zone, 1);
} }
local_unlock_irqrestore(&pagesets.lock, flags);
return page; return page;
} }
...@@ -3586,15 +3586,15 @@ struct page *rmqueue(struct zone *preferred_zone, ...@@ -3586,15 +3586,15 @@ struct page *rmqueue(struct zone *preferred_zone,
if (!page) if (!page)
page = __rmqueue(zone, order, migratetype, alloc_flags); page = __rmqueue(zone, order, migratetype, alloc_flags);
} while (page && check_new_pages(page, order)); } while (page && check_new_pages(page, order));
spin_unlock(&zone->lock);
if (!page) if (!page)
goto failed; goto failed;
__mod_zone_freepage_state(zone, -(1 << order), __mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page)); get_pcppage_migratetype(page));
spin_unlock_irqrestore(&zone->lock, flags);
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, 1); zone_statistics(preferred_zone, zone, 1);
local_irq_restore(flags);
out: out:
/* Separate test+clear to avoid unnecessary atomics */ /* Separate test+clear to avoid unnecessary atomics */
...@@ -3607,7 +3607,7 @@ struct page *rmqueue(struct zone *preferred_zone, ...@@ -3607,7 +3607,7 @@ struct page *rmqueue(struct zone *preferred_zone,
return page; return page;
failed: failed:
local_irq_restore(flags); spin_unlock_irqrestore(&zone->lock, flags);
return NULL; return NULL;
} }
...@@ -5165,11 +5165,11 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, ...@@ -5165,11 +5165,11 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nr_populated++; nr_populated++;
} }
local_unlock_irqrestore(&pagesets.lock, flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
local_unlock_irqrestore(&pagesets.lock, flags);
return nr_populated; return nr_populated;
failed_irq: failed_irq:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment