Commit 702a7c76 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (21 commits)
  sched: Remove forced2_migrations stats
  sched: Fix memory leak in two error corner cases
  sched: Fix build warning in get_update_sysctl_factor()
  sched: Update normalized values on user updates via proc
  sched: Make tunable scaling style configurable
  sched: Fix missing sched tunable recalculation on cpu add/remove
  sched: Fix task priority bug
  sched: cgroup: Implement different treatment for idle shares
  sched: Remove unnecessary RCU exclusion
  sched: Discard some old bits
  sched: Clean up check_preempt_wakeup()
  sched: Move update_curr() in check_preempt_wakeup() to avoid redundant call
  sched: Sanitize fork() handling
  sched: Clean up ttwu() rq locking
  sched: Remove rq->clock coupling from set_task_cpu()
  sched: Consolidate select_task_rq() callers
  sched: Remove sysctl.sched_features
  sched: Protect sched_rr_get_param() access to task->sched_class
  sched: Protect task->cpus_allowed access in sched_getaffinity()
  sched: Fix balance vs hotplug race
  ...

Fixed up conflicts in kernel/sysctl.c (due to sysctl cleanup)
parents 053fe57a b9889ed1
...@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask; ...@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() cpumask_weight(cpu_online_mask) #define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
...@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask; ...@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() 1 #define num_online_cpus() 1
#define num_possible_cpus() 1 #define num_possible_cpus() 1
#define num_present_cpus() 1 #define num_present_cpus() 1
#define num_active_cpus() 1
#define cpu_online(cpu) ((cpu) == 0) #define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0)
......
...@@ -1102,7 +1102,7 @@ struct sched_class { ...@@ -1102,7 +1102,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq); void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_new) (struct rq *rq, struct task_struct *p); void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task, void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running); int running);
...@@ -1111,7 +1111,8 @@ struct sched_class { ...@@ -1111,7 +1111,8 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task, void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running); int oldprio, int running);
unsigned int (*get_rr_interval) (struct task_struct *task); unsigned int (*get_rr_interval) (struct rq *rq,
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
void (*moved_group) (struct task_struct *p); void (*moved_group) (struct task_struct *p);
...@@ -1151,8 +1152,6 @@ struct sched_entity { ...@@ -1151,8 +1152,6 @@ struct sched_entity {
u64 start_runtime; u64 start_runtime;
u64 avg_wakeup; u64 avg_wakeup;
u64 avg_running;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
u64 wait_start; u64 wait_start;
u64 wait_max; u64 wait_max;
...@@ -1175,7 +1174,6 @@ struct sched_entity { ...@@ -1175,7 +1174,6 @@ struct sched_entity {
u64 nr_failed_migrations_running; u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot; u64 nr_failed_migrations_hot;
u64 nr_forced_migrations; u64 nr_forced_migrations;
u64 nr_forced2_migrations;
u64 nr_wakeups; u64 nr_wakeups;
u64 nr_wakeups_sync; u64 nr_wakeups_sync;
...@@ -1904,14 +1902,22 @@ extern unsigned int sysctl_sched_wakeup_granularity; ...@@ -1904,14 +1902,22 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit; extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh; extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_child_runs_first;
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg; extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration; extern unsigned int sysctl_timer_migration;
int sched_nr_latency_handler(struct ctl_table *table, int write, int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, void __user *buffer, size_t *length,
loff_t *ppos); loff_t *ppos);
#endif #endif
......
...@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls); hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) { if (err == NOTIFY_BAD) {
set_cpu_active(cpu, true);
nr_calls--; nr_calls--;
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu, nr_calls, NULL); hcpu, nr_calls, NULL);
...@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
/* Ensure that we are not runnable on dying cpu */ /* Ensure that we are not runnable on dying cpu */
cpumask_copy(old_allowed, &current->cpus_allowed); cpumask_copy(old_allowed, &current->cpus_allowed);
set_cpus_allowed_ptr(current, set_cpus_allowed_ptr(current, cpu_active_mask);
cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) { if (err) {
set_cpu_active(cpu, true);
/* CPU didn't die: tell everyone. Can't complain. */ /* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu) == NOTIFY_BAD) hcpu) == NOTIFY_BAD)
...@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu) ...@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)
err = _cpu_down(cpu, 0); err = _cpu_down(cpu, 0);
if (cpu_online(cpu))
set_cpu_active(cpu, true);
out: out:
cpu_maps_update_done(); cpu_maps_update_done();
stop_machine_destroy(); stop_machine_destroy();
...@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void) ...@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
* with the userspace trying to use the CPU hotplug at the same time * with the userspace trying to use the CPU hotplug at the same time
*/ */
cpumask_clear(frozen_cpus); cpumask_clear(frozen_cpus);
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
set_cpu_active(cpu, false);
}
synchronize_sched();
printk("Disabling non-boot CPUs ...\n"); printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (cpu == first_cpu) if (cpu == first_cpu)
......
...@@ -737,7 +737,7 @@ static void do_rebuild_sched_domains(struct work_struct *unused) ...@@ -737,7 +737,7 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
{ {
} }
static int generate_sched_domains(struct cpumask **domains, static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes) struct sched_domain_attr **attributes)
{ {
*domains = NULL; *domains = NULL;
...@@ -872,7 +872,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, ...@@ -872,7 +872,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0) if (retval < 0)
return retval; return retval;
if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask)) if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
return -EINVAL; return -EINVAL;
} }
retval = validate_change(cs, trialcs); retval = validate_change(cs, trialcs);
...@@ -2010,7 +2010,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) ...@@ -2010,7 +2010,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
} }
/* Continue past cpusets with all cpus, mems online */ /* Continue past cpusets with all cpus, mems online */
if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) && if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
continue; continue;
...@@ -2019,7 +2019,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) ...@@ -2019,7 +2019,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
/* Remove offline cpus and mems from this cpuset. */ /* Remove offline cpus and mems from this cpuset. */
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cpumask_and(cp->cpus_allowed, cp->cpus_allowed, cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
cpu_online_mask); cpu_active_mask);
nodes_and(cp->mems_allowed, cp->mems_allowed, nodes_and(cp->mems_allowed, cp->mems_allowed,
node_states[N_HIGH_MEMORY]); node_states[N_HIGH_MEMORY]);
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
...@@ -2057,8 +2057,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, ...@@ -2057,8 +2057,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
switch (phase) { switch (phase) {
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
case CPU_DEAD: case CPU_DOWN_PREPARE:
case CPU_DEAD_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
break; break;
default: default:
...@@ -2067,7 +2069,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, ...@@ -2067,7 +2069,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
cgroup_lock(); cgroup_lock();
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
scan_for_empty_cpusets(&top_cpuset); scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr); ndoms = generate_sched_domains(&doms, &attr);
...@@ -2114,7 +2116,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self, ...@@ -2114,7 +2116,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
void __init cpuset_init_smp(void) void __init cpuset_init_smp(void)
{ {
cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotcpu_notifier(cpuset_track_online_cpus, 0); hotcpu_notifier(cpuset_track_online_cpus, 0);
......
This diff is collapsed.
...@@ -309,6 +309,12 @@ static void print_cpu(struct seq_file *m, int cpu) ...@@ -309,6 +309,12 @@ static void print_cpu(struct seq_file *m, int cpu)
print_rq(m, rq, cpu); print_rq(m, rq, cpu);
} }
static const char *sched_tunable_scaling_names[] = {
"none",
"logaritmic",
"linear"
};
static int sched_debug_show(struct seq_file *m, void *v) static int sched_debug_show(struct seq_file *m, void *v)
{ {
u64 now = ktime_to_ns(ktime_get()); u64 now = ktime_to_ns(ktime_get());
...@@ -334,6 +340,10 @@ static int sched_debug_show(struct seq_file *m, void *v) ...@@ -334,6 +340,10 @@ static int sched_debug_show(struct seq_file *m, void *v)
#undef PN #undef PN
#undef P #undef P
SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
sysctl_sched_tunable_scaling,
sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
print_cpu(m, cpu); print_cpu(m, cpu);
...@@ -399,7 +409,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -399,7 +409,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.sum_exec_runtime); PN(se.sum_exec_runtime);
PN(se.avg_overlap); PN(se.avg_overlap);
PN(se.avg_wakeup); PN(se.avg_wakeup);
PN(se.avg_running);
nr_switches = p->nvcsw + p->nivcsw; nr_switches = p->nvcsw + p->nivcsw;
...@@ -423,7 +432,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -423,7 +432,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.nr_failed_migrations_running); P(se.nr_failed_migrations_running);
P(se.nr_failed_migrations_hot); P(se.nr_failed_migrations_hot);
P(se.nr_forced_migrations); P(se.nr_forced_migrations);
P(se.nr_forced2_migrations);
P(se.nr_wakeups); P(se.nr_wakeups);
P(se.nr_wakeups_sync); P(se.nr_wakeups_sync);
P(se.nr_wakeups_migrate); P(se.nr_wakeups_migrate);
...@@ -499,7 +507,6 @@ void proc_sched_set_task(struct task_struct *p) ...@@ -499,7 +507,6 @@ void proc_sched_set_task(struct task_struct *p)
p->se.nr_failed_migrations_running = 0; p->se.nr_failed_migrations_running = 0;
p->se.nr_failed_migrations_hot = 0; p->se.nr_failed_migrations_hot = 0;
p->se.nr_forced_migrations = 0; p->se.nr_forced_migrations = 0;
p->se.nr_forced2_migrations = 0;
p->se.nr_wakeups = 0; p->se.nr_wakeups = 0;
p->se.nr_wakeups_sync = 0; p->se.nr_wakeups_sync = 0;
p->se.nr_wakeups_migrate = 0; p->se.nr_wakeups_migrate = 0;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
*/ */
#include <linux/latencytop.h> #include <linux/latencytop.h>
#include <linux/sched.h>
/* /*
* Targeted preemption latency for CPU-bound tasks: * Targeted preemption latency for CPU-bound tasks:
...@@ -35,12 +36,26 @@ ...@@ -35,12 +36,26 @@
* run vmstat and monitor the context-switches (cs) field) * run vmstat and monitor the context-switches (cs) field)
*/ */
unsigned int sysctl_sched_latency = 5000000ULL; unsigned int sysctl_sched_latency = 5000000ULL;
unsigned int normalized_sysctl_sched_latency = 5000000ULL;
/*
* The initial- and re-scaling of tunables is configurable
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*
* Options are:
* SCHED_TUNABLESCALING_NONE - unscaled, always *1
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*/
enum sched_tunable_scaling sysctl_sched_tunable_scaling
= SCHED_TUNABLESCALING_LOG;
/* /*
* Minimal preemption granularity for CPU-bound tasks: * Minimal preemption granularity for CPU-bound tasks:
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/ */
unsigned int sysctl_sched_min_granularity = 1000000ULL; unsigned int sysctl_sched_min_granularity = 1000000ULL;
unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
/* /*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
...@@ -70,6 +85,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield; ...@@ -70,6 +85,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
* have immediate wakeup/sleep latencies. * have immediate wakeup/sleep latencies.
*/ */
unsigned int sysctl_sched_wakeup_granularity = 1000000UL; unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
...@@ -383,11 +399,12 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) ...@@ -383,11 +399,12 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
*/ */
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
int sched_nr_latency_handler(struct ctl_table *table, int write, int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos) loff_t *ppos)
{ {
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
int factor = get_update_sysctl_factor();
if (ret || !write) if (ret || !write)
return ret; return ret;
...@@ -395,6 +412,14 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, ...@@ -395,6 +412,14 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
sysctl_sched_min_granularity); sysctl_sched_min_granularity);
#define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor))
WRT_SYSCTL(sched_min_granularity);
WRT_SYSCTL(sched_latency);
WRT_SYSCTL(sched_wakeup_granularity);
WRT_SYSCTL(sched_shares_ratelimit);
#undef WRT_SYSCTL
return 0; return 0;
} }
#endif #endif
...@@ -1403,7 +1428,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag ...@@ -1403,7 +1428,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
new_cpu = prev_cpu; new_cpu = prev_cpu;
} }
rcu_read_lock();
for_each_domain(cpu, tmp) { for_each_domain(cpu, tmp) {
/* /*
* If power savings logic is enabled for a domain, see if we * If power savings logic is enabled for a domain, see if we
...@@ -1484,10 +1508,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag ...@@ -1484,10 +1508,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
update_shares(tmp); update_shares(tmp);
} }
if (affine_sd && wake_affine(affine_sd, p, sync)) { if (affine_sd && wake_affine(affine_sd, p, sync))
new_cpu = cpu; return cpu;
goto out;
}
while (sd) { while (sd) {
int load_idx = sd->forkexec_idx; int load_idx = sd->forkexec_idx;
...@@ -1528,8 +1550,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag ...@@ -1528,8 +1550,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
/* while loop will break here if sd == NULL */ /* while loop will break here if sd == NULL */
} }
out:
rcu_read_unlock();
return new_cpu; return new_cpu;
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -1651,12 +1671,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ ...@@ -1651,12 +1671,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
int scale = cfs_rq->nr_running >= sched_nr_latency; int scale = cfs_rq->nr_running >= sched_nr_latency;
update_curr(cfs_rq); if (unlikely(rt_prio(p->prio)))
goto preempt;
if (unlikely(rt_prio(p->prio))) {
resched_task(curr);
return;
}
if (unlikely(p->sched_class != &fair_sched_class)) if (unlikely(p->sched_class != &fair_sched_class))
return; return;
...@@ -1682,50 +1698,44 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ ...@@ -1682,50 +1698,44 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return; return;
/* Idle tasks are by definition preempted by everybody. */ /* Idle tasks are by definition preempted by everybody. */
if (unlikely(curr->policy == SCHED_IDLE)) { if (unlikely(curr->policy == SCHED_IDLE))
resched_task(curr); goto preempt;
return;
}
if ((sched_feat(WAKEUP_SYNC) && sync) || if (sched_feat(WAKEUP_SYNC) && sync)
(sched_feat(WAKEUP_OVERLAP) && goto preempt;
(se->avg_overlap < sysctl_sched_migration_cost &&
pse->avg_overlap < sysctl_sched_migration_cost))) {
resched_task(curr);
return;
}
if (sched_feat(WAKEUP_RUNNING)) { if (sched_feat(WAKEUP_OVERLAP) &&
if (pse->avg_running < se->avg_running) { se->avg_overlap < sysctl_sched_migration_cost &&
set_next_buddy(pse); pse->avg_overlap < sysctl_sched_migration_cost)
resched_task(curr); goto preempt;
return;
}
}
if (!sched_feat(WAKEUP_PREEMPT)) if (!sched_feat(WAKEUP_PREEMPT))
return; return;
update_curr(cfs_rq);
find_matching_se(&se, &pse); find_matching_se(&se, &pse);
BUG_ON(!pse); BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1)
goto preempt;
if (wakeup_preempt_entity(se, pse) == 1) { return;
resched_task(curr);
/* preempt:
* Only set the backward buddy when the current task is still resched_task(curr);
* on the rq. This can happen when a wakeup gets interleaved /*
* with schedule on the ->pre_schedule() or idle_balance() * Only set the backward buddy when the current task is still
* point, either of which can * drop the rq lock. * on the rq. This can happen when a wakeup gets interleaved
* * with schedule on the ->pre_schedule() or idle_balance()
* Also, during early boot the idle thread is in the fair class, * point, either of which can * drop the rq lock.
* for obvious reasons its a bad idea to schedule back to it. *
*/ * Also, during early boot the idle thread is in the fair class,
if (unlikely(!se->on_rq || curr == rq->idle)) * for obvious reasons its a bad idea to schedule back to it.
return; */
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) if (unlikely(!se->on_rq || curr == rq->idle))
set_last_buddy(se); return;
}
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
} }
static struct task_struct *pick_next_task_fair(struct rq *rq) static struct task_struct *pick_next_task_fair(struct rq *rq)
...@@ -1905,6 +1915,17 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -1905,6 +1915,17 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0; return 0;
} }
static void rq_online_fair(struct rq *rq)
{
update_sysctl();
}
static void rq_offline_fair(struct rq *rq)
{
update_sysctl();
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
...@@ -1922,28 +1943,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) ...@@ -1922,28 +1943,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
} }
/* /*
* Share the fairness runtime between parent and child, thus the * called on fork with the child task as argument from the parent's context
* total amount of pressure for CPU stays equal - new tasks * - child not yet on the tasklist
* get a chance to run but frequent forkers are not allowed to * - preemption disabled
* monopolize the CPU. Note: the parent runqueue is locked,
* the child is not running yet.
*/ */
static void task_new_fair(struct rq *rq, struct task_struct *p) static void task_fork_fair(struct task_struct *p)
{ {
struct cfs_rq *cfs_rq = task_cfs_rq(p); struct cfs_rq *cfs_rq = task_cfs_rq(current);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr; struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
struct rq *rq = this_rq();
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
sched_info_queued(p); if (unlikely(task_cpu(p) != this_cpu))
__set_task_cpu(p, this_cpu);
update_curr(cfs_rq); update_curr(cfs_rq);
if (curr) if (curr)
se->vruntime = curr->vruntime; se->vruntime = curr->vruntime;
place_entity(cfs_rq, se, 1); place_entity(cfs_rq, se, 1);
/* 'curr' will be NULL if the child belongs to a different group */ if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
curr && entity_before(curr, se)) {
/* /*
* Upon rescheduling, sched_class::put_prev_task() will place * Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value. * 'current' within the tree based on its new key value.
...@@ -1952,7 +1975,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) ...@@ -1952,7 +1975,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
resched_task(rq->curr); resched_task(rq->curr);
} }
enqueue_task_fair(rq, p, 0); spin_unlock_irqrestore(&rq->lock, flags);
} }
/* /*
...@@ -2014,21 +2037,17 @@ static void moved_group_fair(struct task_struct *p) ...@@ -2014,21 +2037,17 @@ static void moved_group_fair(struct task_struct *p)
} }
#endif #endif
unsigned int get_rr_interval_fair(struct task_struct *task) unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{ {
struct sched_entity *se = &task->se; struct sched_entity *se = &task->se;
unsigned long flags;
struct rq *rq;
unsigned int rr_interval = 0; unsigned int rr_interval = 0;
/* /*
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
* idle runqueue: * idle runqueue:
*/ */
rq = task_rq_lock(task, &flags);
if (rq->cfs.load.weight) if (rq->cfs.load.weight)
rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
task_rq_unlock(rq, &flags);
return rr_interval; return rr_interval;
} }
...@@ -2052,11 +2071,13 @@ static const struct sched_class fair_sched_class = { ...@@ -2052,11 +2071,13 @@ static const struct sched_class fair_sched_class = {
.load_balance = load_balance_fair, .load_balance = load_balance_fair,
.move_one_task = move_one_task_fair, .move_one_task = move_one_task_fair,
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,
#endif #endif
.set_curr_task = set_curr_task_fair, .set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair, .task_tick = task_tick_fair,
.task_new = task_new_fair, .task_fork = task_fork_fair,
.prio_changed = prio_changed_fair, .prio_changed = prio_changed_fair,
.switched_to = switched_to_fair, .switched_to = switched_to_fair,
......
...@@ -53,11 +53,6 @@ SCHED_FEAT(WAKEUP_SYNC, 0) ...@@ -53,11 +53,6 @@ SCHED_FEAT(WAKEUP_SYNC, 0)
*/ */
SCHED_FEAT(WAKEUP_OVERLAP, 0) SCHED_FEAT(WAKEUP_OVERLAP, 0)
/*
* Wakeup preemption towards tasks that run short
*/
SCHED_FEAT(WAKEUP_RUNNING, 0)
/* /*
* Use the SYNC wakeup hint, pipes and the likes use this to indicate * Use the SYNC wakeup hint, pipes and the likes use this to indicate
* the remote end is likely to consume the data we just wrote, and * the remote end is likely to consume the data we just wrote, and
......
...@@ -97,7 +97,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, ...@@ -97,7 +97,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
check_preempt_curr(rq, p, 0); check_preempt_curr(rq, p, 0);
} }
unsigned int get_rr_interval_idle(struct task_struct *task) unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{ {
return 0; return 0;
} }
......
...@@ -1721,7 +1721,7 @@ static void set_curr_task_rt(struct rq *rq) ...@@ -1721,7 +1721,7 @@ static void set_curr_task_rt(struct rq *rq)
dequeue_pushable_task(rq, p); dequeue_pushable_task(rq, p);
} }
unsigned int get_rr_interval_rt(struct task_struct *task) unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{ {
/* /*
* Time slice is 0 for SCHED_FIFO tasks * Time slice is 0 for SCHED_FIFO tasks
......
...@@ -244,6 +244,10 @@ static int min_sched_granularity_ns = 100000; /* 100 usecs */ ...@@ -244,6 +244,10 @@ static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */ static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
static int min_sched_shares_ratelimit = 100000; /* 100 usec */
static int max_sched_shares_ratelimit = NSEC_PER_SEC; /* 1 second */
#endif #endif
static struct ctl_table kern_table[] = { static struct ctl_table kern_table[] = {
...@@ -260,7 +264,7 @@ static struct ctl_table kern_table[] = { ...@@ -260,7 +264,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_min_granularity, .data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),
.mode = 0644, .mode = 0644,
.proc_handler = sched_nr_latency_handler, .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns, .extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns, .extra2 = &max_sched_granularity_ns,
}, },
...@@ -269,7 +273,7 @@ static struct ctl_table kern_table[] = { ...@@ -269,7 +273,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_latency, .data = &sysctl_sched_latency,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),
.mode = 0644, .mode = 0644,
.proc_handler = sched_nr_latency_handler, .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns, .extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns, .extra2 = &max_sched_granularity_ns,
}, },
...@@ -278,7 +282,7 @@ static struct ctl_table kern_table[] = { ...@@ -278,7 +282,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_wakeup_granularity, .data = &sysctl_sched_wakeup_granularity,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec_minmax, .proc_handler = sched_proc_update_handler,
.extra1 = &min_wakeup_granularity_ns, .extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns,
}, },
...@@ -287,7 +291,18 @@ static struct ctl_table kern_table[] = { ...@@ -287,7 +291,18 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_shares_ratelimit, .data = &sysctl_sched_shares_ratelimit,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_shares_ratelimit,
.extra2 = &max_sched_shares_ratelimit,
},
{
.procname = "sched_tunable_scaling",
.data = &sysctl_sched_tunable_scaling,
.maxlen = sizeof(enum sched_tunable_scaling),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_tunable_scaling,
.extra2 = &max_sched_tunable_scaling,
}, },
{ {
.procname = "sched_shares_thresh", .procname = "sched_shares_thresh",
...@@ -297,13 +312,6 @@ static struct ctl_table kern_table[] = { ...@@ -297,13 +312,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec_minmax, .proc_handler = proc_dointvec_minmax,
.extra1 = &zero, .extra1 = &zero,
}, },
{
.procname = "sched_features",
.data = &sysctl_sched_features,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ {
.procname = "sched_migration_cost", .procname = "sched_migration_cost",
.data = &sysctl_sched_migration_cost, .data = &sysctl_sched_migration_cost,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment