Commit 82b212f4 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

Revert "mm: remove __GFP_NO_KSWAPD"

With "mm: vmscan: scale number of pages reclaimed by reclaim/compaction
based on failures" reverted, Zdenek Kabelac reported the following

  Hmm,  so it's just took longer to hit the problem and observe
  kswapd0 spinning on my CPU again - it's not as endless like before -
  but still it easily eats minutes - it helps to	turn off  Firefox
  or TB  (memory hungry apps) so kswapd0 stops soon - and restart
  those apps again.  (And I still have like >1GB of cached memory)

  kswapd0         R  running task        0    30      2 0x00000000
  Call Trace:
    preempt_schedule+0x42/0x60
    _raw_spin_unlock+0x55/0x60
    put_super+0x31/0x40
    drop_super+0x22/0x30
    prune_super+0x149/0x1b0
    shrink_slab+0xba/0x510

The sysrq+m indicates the system has no swap so it'll never reclaim
anonymous pages as part of reclaim/compaction.  That is one part of the
problem but not the root cause as file-backed pages could also be
reclaimed.

The likely underlying problem is that kswapd is woken up or kept awake
for each THP allocation request in the page allocator slow path.

If compaction fails for the requesting process then compaction will be
deferred for a time and direct reclaim is avoided.  However, if there
are a storm of THP requests that are simply rejected, it will still be
the the case that kswapd is awake for a prolonged period of time as
pgdat->kswapd_max_order is updated each time.  This is noticed by the
main kswapd() loop and it will not call kswapd_try_to_sleep().  Instead
it will loopp, shrinking a small number of pages and calling
shrink_slab() on each iteration.

The temptation is to supply a patch that checks if kswapd was woken for
THP and if so ignore pgdat->kswapd_max_order but it'll be a hack and not
backed up by proper testing.  As 3.7 is very close to release and this
is not a bug we should release with, a safer path is to revert "mm:
remove __GFP_NO_KSWAPD" for now and revisit it with the view to ironing
out the balance_pgdat() logic in general.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Zdenek Kabelac <zkabelac@redhat.com>
Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robert Jennings <rcj@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 05f56484
...@@ -1077,7 +1077,8 @@ EXPORT_SYMBOL_GPL(mtd_writev); ...@@ -1077,7 +1077,8 @@ EXPORT_SYMBOL_GPL(mtd_writev);
* until the request succeeds or until the allocation size falls below * until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely * the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we * impact system performance, so when allocating more than one page, we
* ask the memory allocator to avoid re-trying. * ask the memory allocator to avoid re-trying, swapping, writing back
* or performing I/O.
* *
* Note, this function also makes sure that the allocated buffer is aligned to * Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
...@@ -1091,7 +1092,8 @@ EXPORT_SYMBOL_GPL(mtd_writev); ...@@ -1091,7 +1092,8 @@ EXPORT_SYMBOL_GPL(mtd_writev);
*/ */
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{ {
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY; gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
__GFP_NORETRY | __GFP_NO_KSWAPD;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf; void *kbuf;
......
...@@ -31,6 +31,7 @@ struct vm_area_struct; ...@@ -31,6 +31,7 @@ struct vm_area_struct;
#define ___GFP_THISNODE 0x40000u #define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u #define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u #define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u #define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u #define ___GFP_WRITE 0x1000000u
...@@ -85,6 +86,7 @@ struct vm_area_struct; ...@@ -85,6 +86,7 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
...@@ -114,7 +116,8 @@ struct vm_area_struct; ...@@ -114,7 +116,8 @@ struct vm_area_struct;
__GFP_MOVABLE) __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS) #define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT" ) : "GFP_NOWAIT"
...@@ -2416,8 +2416,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2416,8 +2416,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto nopage; goto nopage;
restart: restart:
wake_all_kswapd(order, zonelist, high_zoneidx, if (!(gfp_mask & __GFP_NO_KSWAPD))
zone_idx(preferred_zone)); wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
/* /*
* OK, we're below the kswapd watermark and have kicked background * OK, we're below the kswapd watermark and have kicked background
...@@ -2494,7 +2495,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2494,7 +2495,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* system then fail the allocation instead of entering direct reclaim. * system then fail the allocation instead of entering direct reclaim.
*/ */
if ((deferred_compaction || contended_compaction) && if ((deferred_compaction || contended_compaction) &&
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) (gfp_mask & __GFP_NO_KSWAPD))
goto nopage; goto nopage;
/* Try direct reclaim and then allocating */ /* Try direct reclaim and then allocating */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment