Commit 9a5efe34 authored by Linus Torvalds's avatar Linus Torvalds Committed by Linus Torvalds

Cset exclude: mingo@elte.hu|ChangeSet|20020416054244|19697

parent 5ec9321d
...@@ -1672,16 +1672,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask) ...@@ -1672,16 +1672,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
preempt_enable(); preempt_enable();
} }
/*
* Treat the bits of migration_mask as lock bits.
* If the bit corresponding to the cpu a migration_thread is
* running on then we have failed to claim our cpu and must
* yield in order to find another.
*/
static volatile unsigned long migration_mask; static volatile unsigned long migration_mask;
static atomic_t migration_threads_seeking_cpu;
static struct completion migration_complete
= COMPLETION_INITIALIZER(migration_complete);
static int migration_thread(void * unused) static int migration_thread(void * unused)
{ {
...@@ -1705,54 +1696,26 @@ static int migration_thread(void * unused) ...@@ -1705,54 +1696,26 @@ static int migration_thread(void * unused)
* task binds itself to the current CPU. * task binds itself to the current CPU.
*/ */
preempt_disable(); /* wait for all migration threads to start up. */
while (!migration_mask)
/*
* Enter the loop with preemption disabled so that
* smp_processor_id() remains valid through the check. The
* interior of the wait loop re-enables preemption in an
* attempt to get scheduled off the current cpu. When the
* loop is exited the lock bit in migration_mask is acquired
* and preemption is disabled on the way out. This way the
* cpu acquired remains valid when ->cpus_allowed is set.
*/
while (test_and_set_bit(smp_processor_id(), &migration_mask)) {
preempt_enable();
yield(); yield();
for (;;) {
preempt_disable(); preempt_disable();
if (test_and_clear_bit(smp_processor_id(), &migration_mask))
current->cpus_allowed = 1 << smp_processor_id();
if (test_thread_flag(TIF_NEED_RESCHED))
schedule();
if (!migration_mask)
break;
preempt_enable();
} }
current->cpus_allowed = 1 << smp_processor_id();
rq = this_rq(); rq = this_rq();
rq->migration_thread = current; rq->migration_thread = current;
/*
* Now that we've bound ourselves to a cpu, post to
* migration_threads_seeking_cpu and wait for everyone else.
* Preemption should remain disabled and the cpu should remain
* in busywait. Yielding the cpu will allow the livelock
* where where a timing pattern causes an idle task seeking a
* migration_thread to always find the unbound migration_thread
* running on the cpu's it tries to steal tasks from.
*/
atomic_dec(&migration_threads_seeking_cpu);
while (atomic_read(&migration_threads_seeking_cpu))
cpu_relax();
preempt_enable(); preempt_enable();
sprintf(current->comm, "migration_CPU%d", smp_processor_id()); sprintf(current->comm, "migration_CPU%d", smp_processor_id());
/*
* Everyone's found their cpu, so now wake migration_init().
* Multiple wakeups are harmless; removal from the waitqueue
* has locking built-in, and waking an empty queue is valid.
*/
complete(&migration_complete);
/*
* Initiate the event loop.
*/
for (;;) { for (;;) {
runqueue_t *rq_src, *rq_dest; runqueue_t *rq_src, *rq_dest;
struct list_head *head; struct list_head *head;
...@@ -1803,31 +1766,33 @@ static int migration_thread(void * unused) ...@@ -1803,31 +1766,33 @@ static int migration_thread(void * unused)
void __init migration_init(void) void __init migration_init(void)
{ {
unsigned long orig_cache_decay_ticks; unsigned long tmp, orig_cache_decay_ticks;
int cpu; int cpu;
atomic_set(&migration_threads_seeking_cpu, smp_num_cpus); tmp = 0;
for (cpu = 0; cpu < smp_num_cpus; cpu++) {
if (kernel_thread(migration_thread, NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
BUG();
tmp |= (1UL << cpu_logical_map(cpu));
}
preempt_disable(); migration_mask = tmp;
orig_cache_decay_ticks = cache_decay_ticks; orig_cache_decay_ticks = cache_decay_ticks;
cache_decay_ticks = 0; cache_decay_ticks = 0;
for (cpu = 0; cpu < smp_num_cpus; cpu++) for (cpu = 0; cpu < smp_num_cpus; cpu++) {
if (kernel_thread(migration_thread, NULL, int logical = cpu_logical_map(cpu);
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
BUG();
/* while (!cpu_rq(logical)->migration_thread) {
* We cannot have missed the wakeup for the migration_thread set_current_state(TASK_INTERRUPTIBLE);
* bound for the cpu migration_init() is running on cannot schedule_timeout(2);
* acquire this cpu until migration_init() has yielded it by }
* means of wait_for_completion(). }
*/ if (migration_mask)
wait_for_completion(&migration_complete); BUG();
cache_decay_ticks = orig_cache_decay_ticks; cache_decay_ticks = orig_cache_decay_ticks;
preempt_enable();
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment