Commit a1bc5a4e authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

cpusets: replace zone allowed functions with node allowed

The cpuset_zone_allowed() variants are actually only a function of the
zone's node.

Cc: Paul Menage <menage@google.com>
Acked-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7f81b1ae
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/cgroup.h> #include <linux/cgroup.h>
#include <linux/mm.h>
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
...@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void); ...@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void); void cpuset_update_task_memory_state(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{ {
return number_of_cpusets <= 1 || return number_of_cpusets <= 1 ||
__cpuset_zone_allowed_softwall(z, gfp_mask); __cpuset_node_allowed_softwall(node, gfp_mask);
} }
static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{ {
return number_of_cpusets <= 1 || return number_of_cpusets <= 1 ||
__cpuset_zone_allowed_hardwall(z, gfp_mask); __cpuset_node_allowed_hardwall(node, gfp_mask);
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
}
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
} }
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
...@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) ...@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1; return 1;
} }
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{ {
return 1; return 1;
......
...@@ -2181,26 +2181,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) ...@@ -2181,26 +2181,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
} }
/** /**
* cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? * cpuset_node_allowed_softwall - Can we allocate on a memory node?
* @z: is this zone on an allowed node? * @node: is this an allowed node?
* @gfp_mask: memory allocation flags * @gfp_mask: memory allocation flags
* *
* If we're in interrupt, yes, we can always allocate. If * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
* __GFP_THISNODE is set, yes, we can always allocate. If zone * set, yes, we can always allocate. If node is in our task's mems_allowed,
* z's node is in our tasks mems_allowed, yes. If it's not a * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
* __GFP_HARDWALL request and this zone's nodes is in the nearest * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
* hardwalled cpuset ancestor to this tasks cpuset, yes. * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
* If the task has been OOM killed and has access to memory reserves * flag, yes.
* as specified by the TIF_MEMDIE flag, yes.
* Otherwise, no. * Otherwise, no.
* *
* If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
* reduces to cpuset_zone_allowed_hardwall(). Otherwise, * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
* cpuset_zone_allowed_softwall() might sleep, and might allow a zone * might sleep, and might allow a node from an enclosing cpuset.
* from an enclosing cpuset.
* *
* cpuset_zone_allowed_hardwall() only handles the simpler case of * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
* hardwall cpusets, and never sleeps. * cpusets, and never sleeps.
* *
* The __GFP_THISNODE placement logic is really handled elsewhere, * The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by * by forcibly using a zonelist starting at a specified node, and by
...@@ -2239,20 +2237,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) ...@@ -2239,20 +2237,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok. * GFP_USER - only nodes in current tasks mems allowed ok.
* *
* Rule: * Rule:
* Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
* pass in the __GFP_HARDWALL flag set in gfp_flag, which disables * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
* the code that might scan up ancestor cpusets and sleep. * the code that might scan up ancestor cpusets and sleep.
*/ */
int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{ {
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */ const struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */ int allowed; /* is allocation in zone z allowed? */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1; return 1;
node = zone_to_nid(z);
might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed)) if (node_isset(node, current->mems_allowed))
return 1; return 1;
...@@ -2281,15 +2276,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) ...@@ -2281,15 +2276,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
} }
/* /*
* cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
* @z: is this zone on an allowed node? * @node: is this an allowed node?
* @gfp_mask: memory allocation flags * @gfp_mask: memory allocation flags
* *
* If we're in interrupt, yes, we can always allocate. * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
* If __GFP_THISNODE is set, yes, we can always allocate. If zone * set, yes, we can always allocate. If node is in our task's mems_allowed,
* z's node is in our tasks mems_allowed, yes. If the task has been * yes. If the task has been OOM killed and has access to memory reserves as
* OOM killed and has access to memory reserves as specified by the * specified by the TIF_MEMDIE flag, yes.
* TIF_MEMDIE flag, yes. Otherwise, no. * Otherwise, no.
* *
* The __GFP_THISNODE placement logic is really handled elsewhere, * The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by * by forcibly using a zonelist starting at a specified node, and by
...@@ -2297,20 +2292,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) ...@@ -2297,20 +2292,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
* any node on the zonelist except the first. By the time any such * any node on the zonelist except the first. By the time any such
* calls get to this routine, we should just shut up and say 'yes'. * calls get to this routine, we should just shut up and say 'yes'.
* *
* Unlike the cpuset_zone_allowed_softwall() variant, above, * Unlike the cpuset_node_allowed_softwall() variant, above,
* this variant requires that the zone be in the current tasks * this variant requires that the node be in the current task's
* mems_allowed or that we're in interrupt. It does not scan up the * mems_allowed or that we're in interrupt. It does not scan up the
* cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
* It never sleeps. * It never sleeps.
*/ */
int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
{ {
int node; /* node that zone z is on */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1; return 1;
node = zone_to_nid(z);
if (node_isset(node, current->mems_allowed)) if (node_isset(node, current->mems_allowed))
return 1; return 1;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment