Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
4f4eb77b
Commit
4f4eb77b
authored
Jun 20, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
- small UP optimisation from Mikael Pettersson and James Bottomley, modified.
parent
b74e58ec
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
44 additions
and
16 deletions
+44
-16
include/linux/sched.h
include/linux/sched.h
+28
-0
kernel/sched.c
kernel/sched.c
+16
-16
No files found.
include/linux/sched.h
View file @
4f4eb77b
...
@@ -863,6 +863,34 @@ static inline void recalc_sigpending(void)
...
@@ -863,6 +863,34 @@ static inline void recalc_sigpending(void)
clear_thread_flag
(
TIF_SIGPENDING
);
clear_thread_flag
(
TIF_SIGPENDING
);
}
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
#ifdef CONFIG_SMP
static
inline
unsigned
int
task_cpu
(
struct
task_struct
*
p
)
{
return
p
->
thread_info
->
cpu
;
}
static
inline
void
set_task_cpu
(
struct
task_struct
*
p
,
unsigned
int
cpu
)
{
p
->
thread_info
->
cpu
=
cpu
;
}
#else
static
inline
unsigned
int
task_cpu
(
struct
task_struct
*
p
)
{
return
0
;
}
static
inline
void
set_task_cpu
(
struct
task_struct
*
p
,
unsigned
int
cpu
)
{
}
#endif
/* CONFIG_SMP */
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
#endif
kernel/sched.c
View file @
4f4eb77b
...
@@ -148,7 +148,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
...
@@ -148,7 +148,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_rq(cpu) (runqueues + (cpu))
#define cpu_rq(cpu) (runqueues + (cpu))
#define this_rq() cpu_rq(smp_processor_id())
#define this_rq() cpu_rq(smp_processor_id())
#define task_rq(p) cpu_rq(
(p)->thread_info->cpu
)
#define task_rq(p) cpu_rq(
task_cpu(p)
)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
...
@@ -284,8 +284,8 @@ static inline void resched_task(task_t *p)
...
@@ -284,8 +284,8 @@ static inline void resched_task(task_t *p)
need_resched
=
test_and_set_tsk_thread_flag
(
p
,
TIF_NEED_RESCHED
);
need_resched
=
test_and_set_tsk_thread_flag
(
p
,
TIF_NEED_RESCHED
);
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
if
(
!
need_resched
&&
!
nrpolling
&&
(
task_cpu
(
p
)
!=
smp_processor_id
()))
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
smp_send_reschedule
(
task_cpu
(
p
)
);
preempt_enable
();
preempt_enable
();
#else
#else
set_tsk_need_resched
(
p
);
set_tsk_need_resched
(
p
);
...
@@ -366,10 +366,10 @@ static int try_to_wake_up(task_t * p, int sync)
...
@@ -366,10 +366,10 @@ static int try_to_wake_up(task_t * p, int sync)
* currently. Do not violate hard affinity.
* currently. Do not violate hard affinity.
*/
*/
if
(
unlikely
(
sync
&&
(
rq
->
curr
!=
p
)
&&
if
(
unlikely
(
sync
&&
(
rq
->
curr
!=
p
)
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
())
&&
(
task_cpu
(
p
)
!=
smp_processor_id
())
&&
(
p
->
cpus_allowed
&
(
1UL
<<
smp_processor_id
()))))
{
(
p
->
cpus_allowed
&
(
1UL
<<
smp_processor_id
()))))
{
p
->
thread_info
->
cpu
=
smp_processor_id
(
);
set_task_cpu
(
p
,
smp_processor_id
()
);
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
repeat_lock_task
;
goto
repeat_lock_task
;
}
}
...
@@ -409,7 +409,7 @@ void wake_up_forked_process(task_t * p)
...
@@ -409,7 +409,7 @@ void wake_up_forked_process(task_t * p)
p
->
sleep_avg
=
p
->
sleep_avg
*
CHILD_PENALTY
/
100
;
p
->
sleep_avg
=
p
->
sleep_avg
*
CHILD_PENALTY
/
100
;
p
->
prio
=
effective_prio
(
p
);
p
->
prio
=
effective_prio
(
p
);
}
}
p
->
thread_info
->
cpu
=
smp_processor_id
(
);
set_task_cpu
(
p
,
smp_processor_id
()
);
activate_task
(
p
,
rq
);
activate_task
(
p
,
rq
);
rq_unlock
(
rq
);
rq_unlock
(
rq
);
...
@@ -663,7 +663,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
...
@@ -663,7 +663,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
*/
*/
dequeue_task
(
next
,
array
);
dequeue_task
(
next
,
array
);
busiest
->
nr_running
--
;
busiest
->
nr_running
--
;
next
->
thread_info
->
cpu
=
this_cpu
;
set_task_cpu
(
next
,
this_cpu
)
;
this_rq
->
nr_running
++
;
this_rq
->
nr_running
++
;
enqueue_task
(
next
,
this_rq
->
active
);
enqueue_task
(
next
,
this_rq
->
active
);
if
(
next
->
prio
<
current
->
prio
)
if
(
next
->
prio
<
current
->
prio
)
...
@@ -821,7 +821,7 @@ asmlinkage void schedule(void)
...
@@ -821,7 +821,7 @@ asmlinkage void schedule(void)
spin_lock_irq
(
&
rq
->
lock
);
spin_lock_irq
(
&
rq
->
lock
);
/*
/*
* if entering off a kernel preemption go straight
* if entering off
of
a kernel preemption go straight
* to picking the next task.
* to picking the next task.
*/
*/
if
(
unlikely
(
preempt_get_count
()
&
PREEMPT_ACTIVE
))
if
(
unlikely
(
preempt_get_count
()
&
PREEMPT_ACTIVE
))
...
@@ -906,7 +906,7 @@ asmlinkage void preempt_schedule(void)
...
@@ -906,7 +906,7 @@ asmlinkage void preempt_schedule(void)
schedule
();
schedule
();
ti
->
preempt_count
=
0
;
ti
->
preempt_count
=
0
;
/* we c
an
miss a preemption opportunity between schedule and now */
/* we c
ould
miss a preemption opportunity between schedule and now */
barrier
();
barrier
();
if
(
unlikely
(
test_thread_flag
(
TIF_NEED_RESCHED
)))
if
(
unlikely
(
test_thread_flag
(
TIF_NEED_RESCHED
)))
goto
need_resched
;
goto
need_resched
;
...
@@ -1630,7 +1630,7 @@ static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
...
@@ -1630,7 +1630,7 @@ static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
void
__init
init_idle
(
task_t
*
idle
,
int
cpu
)
void
__init
init_idle
(
task_t
*
idle
,
int
cpu
)
{
{
runqueue_t
*
idle_rq
=
cpu_rq
(
cpu
),
*
rq
=
cpu_rq
(
idle
->
thread_info
->
cpu
);
runqueue_t
*
idle_rq
=
cpu_rq
(
cpu
),
*
rq
=
cpu_rq
(
task_cpu
(
idle
)
);
unsigned
long
flags
;
unsigned
long
flags
;
__save_flags
(
flags
);
__save_flags
(
flags
);
...
@@ -1642,7 +1642,7 @@ void __init init_idle(task_t *idle, int cpu)
...
@@ -1642,7 +1642,7 @@ void __init init_idle(task_t *idle, int cpu)
idle
->
array
=
NULL
;
idle
->
array
=
NULL
;
idle
->
prio
=
MAX_PRIO
;
idle
->
prio
=
MAX_PRIO
;
idle
->
state
=
TASK_RUNNING
;
idle
->
state
=
TASK_RUNNING
;
idle
->
thread_info
->
cpu
=
cpu
;
set_task_cpu
(
idle
,
cpu
)
;
double_rq_unlock
(
idle_rq
,
rq
);
double_rq_unlock
(
idle_rq
,
rq
);
set_tsk_need_resched
(
idle
);
set_tsk_need_resched
(
idle
);
__restore_flags
(
flags
);
__restore_flags
(
flags
);
...
@@ -1751,7 +1751,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
...
@@ -1751,7 +1751,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
* Can the task run on the task's current CPU? If not then
* Can the task run on the task's current CPU? If not then
* migrate the process off to a proper CPU.
* migrate the process off to a proper CPU.
*/
*/
if
(
new_mask
&
(
1UL
<<
p
->
thread_info
->
cpu
))
{
if
(
new_mask
&
(
1UL
<<
task_cpu
(
p
)
))
{
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
out
;
goto
out
;
}
}
...
@@ -1760,7 +1760,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
...
@@ -1760,7 +1760,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
* it is sufficient to simply update the task's cpu field.
* it is sufficient to simply update the task's cpu field.
*/
*/
if
(
!
p
->
array
&&
(
p
!=
rq
->
curr
))
{
if
(
!
p
->
array
&&
(
p
!=
rq
->
curr
))
{
p
->
thread_info
->
cpu
=
__ffs
(
p
->
cpus_allowed
);
set_task_cpu
(
p
,
__ffs
(
p
->
cpus_allowed
)
);
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
out
;
goto
out
;
}
}
...
@@ -1829,18 +1829,18 @@ static int migration_thread(void * bind_cpu)
...
@@ -1829,18 +1829,18 @@ static int migration_thread(void * bind_cpu)
cpu_dest
=
__ffs
(
p
->
cpus_allowed
);
cpu_dest
=
__ffs
(
p
->
cpus_allowed
);
rq_dest
=
cpu_rq
(
cpu_dest
);
rq_dest
=
cpu_rq
(
cpu_dest
);
repeat:
repeat:
cpu_src
=
p
->
thread_info
->
cpu
;
cpu_src
=
task_cpu
(
p
)
;
rq_src
=
cpu_rq
(
cpu_src
);
rq_src
=
cpu_rq
(
cpu_src
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
double_rq_lock
(
rq_src
,
rq_dest
);
double_rq_lock
(
rq_src
,
rq_dest
);
if
(
p
->
thread_info
->
cpu
!=
cpu_src
)
{
if
(
task_cpu
(
p
)
!=
cpu_src
)
{
double_rq_unlock
(
rq_src
,
rq_dest
);
double_rq_unlock
(
rq_src
,
rq_dest
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
goto
repeat
;
goto
repeat
;
}
}
if
(
rq_src
==
rq
)
{
if
(
rq_src
==
rq
)
{
p
->
thread_info
->
cpu
=
cpu_dest
;
set_task_cpu
(
p
,
cpu_dest
)
;
if
(
p
->
array
)
{
if
(
p
->
array
)
{
deactivate_task
(
p
,
rq_src
);
deactivate_task
(
p
,
rq_src
);
activate_task
(
p
,
rq_dest
);
activate_task
(
p
,
rq_dest
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment