Commit aac3d396 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
  sched: Fix broken assertion
  sched: Assert task state bits at build time
  sched: Update task_state_arraypwith new states
  sched: Add missing state chars to TASK_STATE_TO_CHAR_STR
  sched: Move TASK_STATE_TO_CHAR_STR near the TASK_state bits
  sched: Teach might_sleep() about preemptible RCU
  sched: Make warning less noisy
  sched: Simplify set_task_cpu()
  sched: Remove the cfs_rq dependency from set_task_cpu()
  sched: Add pre and post wakeup hooks
  sched: Move kthread_bind() back to kthread.c
  sched: Fix select_task_rq() vs hotplug issues
  sched: Fix sched_exec() balancing
  sched: Ensure set_task_cpu() is never called on blocked tasks
  sched: Use TASK_WAKING for fork wakups
  sched: Select_task_rq_fair() must honour SD_LOAD_BALANCE
  sched: Fix task_hot() test order
  sched: Fix set_cpu_active() in cpu_down()
  sched: Mark boot-cpu active before smp_init()
  sched: Fix cpu_clock() in NMIs, on !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  ...
parents 10e5453f 077614ee
......@@ -138,9 +138,12 @@ static const char *task_state_array[] = {
"S (sleeping)", /* 1 */
"D (disk sleep)", /* 2 */
"T (stopped)", /* 4 */
"T (tracing stop)", /* 8 */
"t (tracing stop)", /* 8 */
"Z (zombie)", /* 16 */
"X (dead)" /* 32 */
"X (dead)", /* 32 */
"x (dead)", /* 64 */
"K (wakekill)", /* 128 */
"W (waking)", /* 256 */
};
static inline const char *get_task_state(struct task_struct *tsk)
......@@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
const char **p = &task_state_array[0];
BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
while (state) {
p++;
state >>= 1;
......
......@@ -101,4 +101,9 @@ static inline void exit_rcu(void)
{
}
static inline int rcu_preempt_depth(void)
{
return 0;
}
#endif /* __LINUX_RCUTINY_H */
......@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
extern void synchronize_rcu(void);
extern void exit_rcu(void);
/*
* Defined as macro as it is a very low level header
* included from areas that don't even know about current
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock(void)
......@@ -63,6 +69,11 @@ static inline void exit_rcu(void)
{
}
static inline int rcu_preempt_depth(void)
{
return 0;
}
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock_bh(void)
......
......@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_STATE_MAX 512
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
......@@ -1091,7 +1097,8 @@ struct sched_class {
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
......@@ -1115,7 +1122,7 @@ struct sched_class {
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*moved_group) (struct task_struct *p);
void (*moved_group) (struct task_struct *p, int on_rq);
#endif
};
......@@ -2594,8 +2601,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
}
#endif /* CONFIG_MM_OWNER */
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
#endif /* __KERNEL__ */
#endif
......@@ -369,12 +369,6 @@ static void __init smp_init(void)
{
unsigned int cpu;
/*
* Set up the current CPU as possible to migrate to.
* The other ones will be done by cpu_up/cpu_down()
*/
set_cpu_active(smp_processor_id(), true);
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
......@@ -486,6 +480,7 @@ static void __init boot_cpu_init(void)
int cpu = smp_processor_id();
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
set_cpu_online(cpu, true);
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
}
......
......@@ -209,6 +209,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
return -ENOMEM;
cpu_hotplug_begin();
set_cpu_active(cpu, false);
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
......@@ -280,18 +281,6 @@ int __ref cpu_down(unsigned int cpu)
goto out;
}
set_cpu_active(cpu, false);
/*
* Make sure the all cpus did the reschedule and are not
* using stale version of the cpu_active_mask.
* This is not strictly necessary becuase stop_machine()
* that we run down the line already provides the required
* synchronization. But it's really a side effect and we do not
* want to depend on the innards of the stop_machine here.
*/
synchronize_sched();
err = _cpu_down(cpu, 0);
out:
......@@ -382,19 +371,12 @@ int disable_nonboot_cpus(void)
return error;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
/* We take down all of the non-boot CPUs in one shot to avoid races
/*
* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
set_cpu_active(cpu, false);
}
synchronize_sched();
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
......
......@@ -149,6 +149,29 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
}
EXPORT_SYMBOL(kthread_create);
/**
* kthread_bind - bind a just-created kthread to a cpu.
* @p: thread created by kthread_create().
* @cpu: cpu (might not be online, must be possible) for @k to run on.
*
* Description: This function is equivalent to set_cpus_allowed(),
* except that @cpu doesn't need to be online, and the thread must be
* stopped (i.e., just returned from kthread_create()).
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
/* Must have done schedule() in kthread() before we set_task_cpu */
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
WARN_ON(1);
return;
}
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);
/**
* kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create().
......
This diff is collapsed.
......@@ -236,6 +236,18 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
unsigned long long cpu_clock(int cpu)
{
unsigned long long clock;
unsigned long flags;
local_irq_save(flags);
clock = sched_clock_cpu(cpu);
local_irq_restore(flags);
return clock;
}
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init(void)
......@@ -251,17 +263,12 @@ u64 sched_clock_cpu(int cpu)
return sched_clock();
}
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned long long cpu_clock(int cpu)
{
unsigned long long clock;
unsigned long flags;
return sched_clock_cpu(cpu);
}
local_irq_save(flags);
clock = sched_clock_cpu(cpu);
local_irq_restore(flags);
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
return clock;
}
EXPORT_SYMBOL_GPL(cpu_clock);
......@@ -510,6 +510,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
curr->vruntime += delta_exec_weighted;
update_min_vruntime(cfs_rq);
}
......@@ -765,16 +766,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
se->vruntime = vruntime;
}
#define ENQUEUE_WAKEUP 1
#define ENQUEUE_MIGRATE 2
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
/*
* Update the normalized vruntime before updating min_vruntime
* through callig update_curr().
*/
if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
se->vruntime += cfs_rq->min_vruntime;
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
account_entity_enqueue(cfs_rq, se);
if (wakeup) {
if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
}
......@@ -828,6 +839,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
update_min_vruntime(cfs_rq);
/*
* Normalize the entity after updating the min_vruntime because the
* update can refer to the ->curr item and we need to reflect this
* movement in our normalized position.
*/
if (!sleep)
se->vruntime -= cfs_rq->min_vruntime;
}
/*
......@@ -1038,13 +1057,19 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int flags = 0;
if (wakeup)
flags |= ENQUEUE_WAKEUP;
if (p->state == TASK_WAKING)
flags |= ENQUEUE_MIGRATE;
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
wakeup = 1;
enqueue_entity(cfs_rq, se, flags);
flags = ENQUEUE_WAKEUP;
}
hrtick_update(rq);
......@@ -1120,6 +1145,14 @@ static void yield_task_fair(struct rq *rq)
#ifdef CONFIG_SMP
static void task_waking_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
se->vruntime -= cfs_rq->min_vruntime;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* effective_load() calculates the load change as seen from the root_task_group
......@@ -1429,6 +1462,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
}
for_each_domain(cpu, tmp) {
if (!(tmp->flags & SD_LOAD_BALANCE))
continue;
/*
* If power savings logic is enabled for a domain, see if we
* are not overloaded, if so, don't balance wider.
......@@ -1975,6 +2011,8 @@ static void task_fork_fair(struct task_struct *p)
resched_task(rq->curr);
}
se->vruntime -= cfs_rq->min_vruntime;
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
......@@ -2028,11 +2066,12 @@ static void set_curr_task_fair(struct rq *rq)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void moved_group_fair(struct task_struct *p)
static void moved_group_fair(struct task_struct *p, int on_rq)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
update_curr(cfs_rq);
if (!on_rq)
place_entity(cfs_rq, &p->se, 1);
}
#endif
......@@ -2073,6 +2112,8 @@ static const struct sched_class fair_sched_class = {
.move_one_task = move_one_task_fair,
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,
.task_waking = task_waking_fair,
#endif
.set_curr_task = set_curr_task_fair,
......
......@@ -35,7 +35,7 @@ static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
pr_err("bad: scheduling from the idle thread!\n");
dump_stack();
raw_spin_lock_irq(&rq->lock);
}
......
......@@ -1472,7 +1472,7 @@ static void post_schedule_rt(struct rq *rq)
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
......@@ -1753,7 +1753,7 @@ static const struct sched_class rt_sched_class = {
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment