Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
35e181a9
Commit
35e181a9
authored
Apr 17, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge k:t into elte.hu:/home/mingo/BK/mine/linux-2.5
parents
91813920
e5eeec2e
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
64 additions
and
29 deletions
+64
-29
kernel/sched.c
kernel/sched.c
+64
-29
No files found.
kernel/sched.c
View file @
35e181a9
...
@@ -1672,7 +1672,16 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
...
@@ -1672,7 +1672,16 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
preempt_enable
();
preempt_enable
();
}
}
/*
* Treat the bits of migration_mask as lock bits.
* If the bit corresponding to the cpu a migration_thread is
* running on then we have failed to claim our cpu and must
* yield in order to find another.
*/
static
volatile
unsigned
long
migration_mask
;
static
volatile
unsigned
long
migration_mask
;
static
atomic_t
migration_threads_seeking_cpu
;
static
struct
completion
migration_complete
=
COMPLETION_INITIALIZER
(
migration_complete
);
static
int
migration_thread
(
void
*
unused
)
static
int
migration_thread
(
void
*
unused
)
{
{
...
@@ -1696,26 +1705,54 @@ static int migration_thread(void * unused)
...
@@ -1696,26 +1705,54 @@ static int migration_thread(void * unused)
* task binds itself to the current CPU.
* task binds itself to the current CPU.
*/
*/
/* wait for all migration threads to start up. */
preempt_disable
();
while
(
!
migration_mask
)
yield
();
for
(;;)
{
/*
preempt_disable
();
* Enter the loop with preemption disabled so that
if
(
test_and_clear_bit
(
smp_processor_id
(),
&
migration_mask
))
* smp_processor_id() remains valid through the check. The
current
->
cpus_allowed
=
1
<<
smp_processor_id
();
* interior of the wait loop re-enables preemption in an
if
(
test_thread_flag
(
TIF_NEED_RESCHED
))
* attempt to get scheduled off the current cpu. When the
schedule
();
* loop is exited the lock bit in migration_mask is acquired
if
(
!
migration_mask
)
* and preemption is disabled on the way out. This way the
break
;
* cpu acquired remains valid when ->cpus_allowed is set.
*/
while
(
test_and_set_bit
(
smp_processor_id
(),
&
migration_mask
))
{
preempt_enable
();
preempt_enable
();
yield
();
preempt_disable
();
}
}
current
->
cpus_allowed
=
1
<<
smp_processor_id
();
rq
=
this_rq
();
rq
=
this_rq
();
rq
->
migration_thread
=
current
;
rq
->
migration_thread
=
current
;
/*
* Now that we've bound ourselves to a cpu, post to
* migration_threads_seeking_cpu and wait for everyone else.
* Preemption should remain disabled and the cpu should remain
* in busywait. Yielding the cpu will allow the livelock
* where where a timing pattern causes an idle task seeking a
* migration_thread to always find the unbound migration_thread
* running on the cpu's it tries to steal tasks from.
*/
atomic_dec
(
&
migration_threads_seeking_cpu
);
while
(
atomic_read
(
&
migration_threads_seeking_cpu
))
cpu_relax
();
preempt_enable
();
preempt_enable
();
sprintf
(
current
->
comm
,
"migration_CPU%d"
,
smp_processor_id
());
sprintf
(
current
->
comm
,
"migration_CPU%d"
,
smp_processor_id
());
/*
* Everyone's found their cpu, so now wake migration_init().
* Multiple wakeups are harmless; removal from the waitqueue
* has locking built-in, and waking an empty queue is valid.
*/
complete
(
&
migration_complete
);
/*
* Initiate the event loop.
*/
for
(;;)
{
for
(;;)
{
runqueue_t
*
rq_src
,
*
rq_dest
;
runqueue_t
*
rq_src
,
*
rq_dest
;
struct
list_head
*
head
;
struct
list_head
*
head
;
...
@@ -1763,33 +1800,31 @@ static int migration_thread(void * unused)
...
@@ -1763,33 +1800,31 @@ static int migration_thread(void * unused)
void
__init
migration_init
(
void
)
void
__init
migration_init
(
void
)
{
{
unsigned
long
tmp
,
orig_cache_decay_ticks
;
unsigned
long
orig_cache_decay_ticks
;
int
cpu
;
int
cpu
;
tmp
=
0
;
atomic_set
(
&
migration_threads_seeking_cpu
,
smp_num_cpus
);
for
(
cpu
=
0
;
cpu
<
smp_num_cpus
;
cpu
++
)
{
if
(
kernel_thread
(
migration_thread
,
NULL
,
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
)
<
0
)
BUG
();
tmp
|=
(
1UL
<<
cpu_logical_map
(
cpu
));
}
migration_mask
=
tmp
;
preempt_disable
()
;
orig_cache_decay_ticks
=
cache_decay_ticks
;
orig_cache_decay_ticks
=
cache_decay_ticks
;
cache_decay_ticks
=
0
;
cache_decay_ticks
=
0
;
for
(
cpu
=
0
;
cpu
<
smp_num_cpus
;
cpu
++
)
{
for
(
cpu
=
0
;
cpu
<
smp_num_cpus
;
cpu
++
)
int
logical
=
cpu_logical_map
(
cpu
);
if
(
kernel_thread
(
migration_thread
,
NULL
,
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGNAL
)
<
0
)
BUG
();
while
(
!
cpu_rq
(
logical
)
->
migration_thread
)
{
/*
set_current_state
(
TASK_INTERRUPTIBLE
);
* We cannot have missed the wakeup for the migration_thread
schedule_timeout
(
2
);
* bound for the cpu migration_init() is running on cannot
}
* acquire this cpu until migration_init() has yielded it by
}
* means of wait_for_completion().
if
(
migration_mask
)
*/
BUG
(
);
wait_for_completion
(
&
migration_complete
);
cache_decay_ticks
=
orig_cache_decay_ticks
;
cache_decay_ticks
=
orig_cache_decay_ticks
;
preempt_enable
();
}
}
#endif
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment