Commit db7f47cf authored by Paul Menage's avatar Paul Menage Committed by Linus Torvalds

cpusets: allow cpusets to be configured/built on non-SMP systems

Allow cpusets to be configured/built on non-SMP systems

Currently it's impossible to build cpusets under UML on x86-64, since
cpusets depends on SMP and x86-64 UML doesn't support SMP.

There's code in cpusets that doesn't depend on SMP.  This patch surrounds
the minimum amount of cpusets code with #ifdef CONFIG_SMP in order to
allow cpusets to build/run on UP systems (for testing purposes under UML).
Reviewed-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarPaul Menage <menage@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a1bc5a4e
...@@ -531,7 +531,7 @@ config CGROUP_DEVICE ...@@ -531,7 +531,7 @@ config CGROUP_DEVICE
config CPUSETS config CPUSETS
bool "Cpuset support" bool "Cpuset support"
depends on SMP && CGROUPS depends on CGROUPS
help help
This option will let you create and manage CPUSETs which This option will let you create and manage CPUSETs which
allow dynamically partitioning a system into sets of CPUs and allow dynamically partitioning a system into sets of CPUs and
......
...@@ -517,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) ...@@ -517,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
return 0; return 0;
} }
#ifdef CONFIG_SMP
/* /*
* Helper routine for generate_sched_domains(). * Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping cpus_allowed masks? * Do cpusets a, b have overlapping cpus_allowed masks?
...@@ -811,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused) ...@@ -811,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
put_online_cpus(); put_online_cpus();
} }
#else /* !CONFIG_SMP */
static void do_rebuild_sched_domains(struct work_struct *unused)
{
}
static int generate_sched_domains(struct cpumask **domains,
struct sched_domain_attr **attributes)
{
*domains = NULL;
return 1;
}
#endif /* CONFIG_SMP */
static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
...@@ -1164,8 +1177,10 @@ int current_cpuset_is_being_rebound(void) ...@@ -1164,8 +1177,10 @@ int current_cpuset_is_being_rebound(void)
static int update_relax_domain_level(struct cpuset *cs, s64 val) static int update_relax_domain_level(struct cpuset *cs, s64 val)
{ {
#ifdef CONFIG_SMP
if (val < -1 || val >= SD_LV_MAX) if (val < -1 || val >= SD_LV_MAX)
return -EINVAL; return -EINVAL;
#endif
if (val != cs->relax_domain_level) { if (val != cs->relax_domain_level) {
cs->relax_domain_level = val; cs->relax_domain_level = val;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment