Commit 0ce3d744 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro

shrinker: add node awareness

Pass the node of the current zone being reclaimed to shrink_slab(),
allowing the shrinker control nodemask to be set appropriately for node
aware shrinkers.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 4e717f5c
...@@ -692,6 +692,9 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -692,6 +692,9 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
.nr_to_scan = 0, .nr_to_scan = 0,
}; };
nodes_setall(sc.nodes_to_scan);
ret = ashmem_shrink(&ashmem_shrinker, &sc); ret = ashmem_shrink(&ashmem_shrinker, &sc);
sc.nr_to_scan = ret; sc.nr_to_scan = ret;
ashmem_shrink(&ashmem_shrinker, &sc); ashmem_shrink(&ashmem_shrinker, &sc);
......
...@@ -44,6 +44,7 @@ static void drop_slab(void) ...@@ -44,6 +44,7 @@ static void drop_slab(void)
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
}; };
nodes_setall(shrink.nodes_to_scan);
do { do {
nr_objects = shrink_slab(&shrink, 1000, 1000); nr_objects = shrink_slab(&shrink, 1000, 1000);
} while (nr_objects > 10); } while (nr_objects > 10);
......
...@@ -16,6 +16,9 @@ struct shrink_control { ...@@ -16,6 +16,9 @@ struct shrink_control {
/* How many slab objects shrinker() should scan and try to reclaim */ /* How many slab objects shrinker() should scan and try to reclaim */
unsigned long nr_to_scan; unsigned long nr_to_scan;
/* shrink from these nodes */
nodemask_t nodes_to_scan;
}; };
#define SHRINK_STOP (~0UL) #define SHRINK_STOP (~0UL)
......
...@@ -248,10 +248,12 @@ void shake_page(struct page *p, int access) ...@@ -248,10 +248,12 @@ void shake_page(struct page *p, int access)
*/ */
if (access) { if (access) {
int nr; int nr;
int nid = page_to_nid(p);
do { do {
struct shrink_control shrink = { struct shrink_control shrink = {
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
}; };
node_set(nid, shrink.nodes_to_scan);
nr = shrink_slab(&shrink, 1000, 1000); nr = shrink_slab(&shrink, 1000, 1000);
if (page_count(p) == 1) if (page_count(p) == 1)
......
...@@ -2374,12 +2374,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -2374,12 +2374,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
*/ */
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
unsigned long lru_pages = 0; unsigned long lru_pages = 0;
nodes_clear(shrink->nodes_to_scan);
for_each_zone_zonelist(zone, z, zonelist, for_each_zone_zonelist(zone, z, zonelist,
gfp_zone(sc->gfp_mask)) { gfp_zone(sc->gfp_mask)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue; continue;
lru_pages += zone_reclaimable_pages(zone); lru_pages += zone_reclaimable_pages(zone);
node_set(zone_to_nid(zone),
shrink->nodes_to_scan);
} }
shrink_slab(shrink, sc->nr_scanned, lru_pages); shrink_slab(shrink, sc->nr_scanned, lru_pages);
...@@ -2836,6 +2840,8 @@ static bool kswapd_shrink_zone(struct zone *zone, ...@@ -2836,6 +2840,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
return true; return true;
shrink_zone(zone, sc); shrink_zone(zone, sc);
nodes_clear(shrink.nodes_to_scan);
node_set(zone_to_nid(zone), shrink.nodes_to_scan);
reclaim_state->reclaimed_slab = 0; reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
...@@ -3544,10 +3550,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -3544,10 +3550,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* number of slab pages and shake the slab until it is reduced * number of slab pages and shake the slab until it is reduced
* by the same nr_pages that we used for reclaiming unmapped * by the same nr_pages that we used for reclaiming unmapped
* pages. * pages.
*
* Note that shrink_slab will free memory on all zones and may
* take a long time.
*/ */
nodes_clear(shrink.nodes_to_scan);
node_set(zone_to_nid(zone), shrink.nodes_to_scan);
for (;;) { for (;;) {
unsigned long lru_pages = zone_reclaimable_pages(zone); unsigned long lru_pages = zone_reclaimable_pages(zone);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment