Commit 8f4fc071 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

gfp: add __GFP_NOACCOUNT

Not all kmem allocations should be accounted to memcg.  The following
patch gives an example when accounting of a certain type of allocations to
memcg can effectively result in a memory leak.  This patch adds the
__GFP_NOACCOUNT flag which if passed to kmalloc and friends will force the
allocation to go through the root cgroup.  It will be used by the next
patch.

Note, since in case of kmemleak enabled each kmalloc implies yet another
allocation from the kmemleak_object cache, we add __GFP_NOACCOUNT to
gfp_kmemleak_mask.

Alternatively, we could introduce a per kmem cache flag disabling
accounting for all allocations of a particular kind, but (a) we would not
be able to bypass accounting for kmalloc then and (b) a kmem cache with
this flag set could not be merged with a kmem cache without this flag,
which would increase the number of global caches and therefore
fragmentation even if the memory cgroup controller is not used.

Despite its generic name, currently __GFP_NOACCOUNT disables accounting
only for kmem allocations while user page allocations are always charged.
To catch abusing of this flag, a warning is issued on an attempt of
passing it to mem_cgroup_try_charge.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: <stable@vger.kernel.org>	[4.0.x]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4933f55f
...@@ -30,6 +30,7 @@ struct vm_area_struct; ...@@ -30,6 +30,7 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u #define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u #define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u #define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u #define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u #define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u #define ___GFP_OTHER_NODE 0x800000u
...@@ -87,6 +88,7 @@ struct vm_area_struct; ...@@ -87,6 +88,7 @@ struct vm_area_struct;
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
......
...@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) ...@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
return true; return true;
if (gfp & __GFP_NOACCOUNT)
return true;
/* /*
* __GFP_NOFAIL allocations will move on even if charging is not * __GFP_NOFAIL allocations will move on even if charging is not
* possible. Therefore we don't even try, and have this allocation * possible. Therefore we don't even try, and have this allocation
...@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{ {
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
return cachep; return cachep;
if (gfp & __GFP_NOACCOUNT)
return cachep;
if (gfp & __GFP_NOFAIL) if (gfp & __GFP_NOFAIL)
return cachep; return cachep;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
......
...@@ -115,7 +115,8 @@ ...@@ -115,7 +115,8 @@
#define BYTES_PER_POINTER sizeof(void *) #define BYTES_PER_POINTER sizeof(void *)
/* GFP bitmask for kmemleak internal allocations */ /* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
__GFP_NOACCOUNT)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \
__GFP_NOWARN) __GFP_NOWARN)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment