Commit f0432d15 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm, mempolicy: remove per-process flag

PF_MEMPOLICY is an unnecessary optimization for CONFIG_SLAB users.
There's no significant performance degradation to checking
current->mempolicy rather than current->flags & PF_MEMPOLICY in the
allocation path, especially since this is considered unlikely().

Running TCP_RR with netperf-2.4.5 through localhost on 16 cpu machine with
64GB of memory and without a mempolicy:

	threads		before		after
	16		1249409		1244487
	32		1281786		1246783
	48		1239175		1239138
	64		1244642		1241841
	80		1244346		1248918
	96		1266436		1254316
	112		1307398		1312135
	128		1327607		1326502

Per-process flags are a scarce resource so we should free them up whenever
possible and make them available.  We'll be using it shortly for memcg oom
reserves.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Tim Hockin <thockin@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2a389610
...@@ -143,7 +143,6 @@ extern void numa_policy_init(void); ...@@ -143,7 +143,6 @@ extern void numa_policy_init(void);
extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
enum mpol_rebind_step step); enum mpol_rebind_step step);
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags, unsigned long addr, gfp_t gfp_flags,
......
...@@ -1851,7 +1851,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, ...@@ -1851,7 +1851,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
......
...@@ -1276,7 +1276,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1276,7 +1276,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->mempolicy = NULL; p->mempolicy = NULL;
goto bad_fork_cleanup_threadgroup_lock; goto bad_fork_cleanup_threadgroup_lock;
} }
mpol_fix_fork_child_flag(p);
#endif #endif
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
......
...@@ -795,36 +795,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, ...@@ -795,36 +795,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
return err; return err;
} }
/*
* Update task->flags PF_MEMPOLICY bit: set iff non-default
* mempolicy. Allows more rapid checking of this (combined perhaps
* with other PF_* flag bits) on memory allocation hot code paths.
*
* If called from outside this file, the task 'p' should -only- be
* a newly forked child not yet visible on the task list, because
* manipulating the task flags of a visible task is not safe.
*
* The above limitation is why this routine has the funny name
* mpol_fix_fork_child_flag().
*
* It is also safe to call this with a task pointer of current,
* which the static wrapper mpol_set_task_struct_flag() does,
* for use within this file.
*/
void mpol_fix_fork_child_flag(struct task_struct *p)
{
if (p->mempolicy)
p->flags |= PF_MEMPOLICY;
else
p->flags &= ~PF_MEMPOLICY;
}
static void mpol_set_task_struct_flag(void)
{
mpol_fix_fork_child_flag(current);
}
/* Set the process memory policy */ /* Set the process memory policy */
static long do_set_mempolicy(unsigned short mode, unsigned short flags, static long do_set_mempolicy(unsigned short mode, unsigned short flags,
nodemask_t *nodes) nodemask_t *nodes)
...@@ -861,7 +831,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, ...@@ -861,7 +831,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
} }
old = current->mempolicy; old = current->mempolicy;
current->mempolicy = new; current->mempolicy = new;
mpol_set_task_struct_flag();
if (new && new->mode == MPOL_INTERLEAVE && if (new && new->mode == MPOL_INTERLEAVE &&
nodes_weight(new->v.nodes)) nodes_weight(new->v.nodes))
current->il_next = first_node(new->v.nodes); current->il_next = first_node(new->v.nodes);
......
...@@ -3027,7 +3027,7 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3027,7 +3027,7 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
* *
* If we are in_interrupt, then process context, including cpusets and * If we are in_interrupt, then process context, including cpusets and
* mempolicy, may not apply and should not be used for allocation policy. * mempolicy, may not apply and should not be used for allocation policy.
...@@ -3259,7 +3259,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) ...@@ -3259,7 +3259,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{ {
void *objp; void *objp;
if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
objp = alternate_node_alloc(cache, flags); objp = alternate_node_alloc(cache, flags);
if (objp) if (objp)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment