Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
8e197efa
Commit
8e197efa
authored
Jan 18, 2004
by
Andrew Morton
Committed by
Linus Torvalds
Jan 18, 2004
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] sched.c style cleanups
From: Ingo Molnar <mingo@elte.hu> - sched.c style cleanups (no code change)
parent
2df40901
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
97 additions
and
79 deletions
+97
-79
kernel/sched.c
kernel/sched.c
+97
-79
No files found.
kernel/sched.c
View file @
8e197efa
...
@@ -79,13 +79,13 @@
...
@@ -79,13 +79,13 @@
*/
*/
#define MIN_TIMESLICE ( 10 * HZ / 1000)
#define MIN_TIMESLICE ( 10 * HZ / 1000)
#define MAX_TIMESLICE (200 * HZ / 1000)
#define MAX_TIMESLICE (200 * HZ / 1000)
#define ON_RUNQUEUE_WEIGHT 30
#define ON_RUNQUEUE_WEIGHT
30
#define CHILD_PENALTY 95
#define CHILD_PENALTY
95
#define PARENT_PENALTY 100
#define PARENT_PENALTY 100
#define EXIT_WEIGHT 3
#define EXIT_WEIGHT
3
#define PRIO_BONUS_RATIO 25
#define PRIO_BONUS_RATIO
25
#define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
#define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
#define INTERACTIVE_DELTA 2
#define INTERACTIVE_DELTA
2
#define MAX_SLEEP_AVG (AVG_TIMESLICE * MAX_BONUS)
#define MAX_SLEEP_AVG (AVG_TIMESLICE * MAX_BONUS)
#define STARVATION_LIMIT (MAX_SLEEP_AVG)
#define STARVATION_LIMIT (MAX_SLEEP_AVG)
#define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG))
#define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG))
...
@@ -143,7 +143,7 @@
...
@@ -143,7 +143,7 @@
#define TASK_INTERACTIVE(p) \
#define TASK_INTERACTIVE(p) \
((p)->prio <= (p)->static_prio - DELTA(p))
((p)->prio <= (p)->static_prio - DELTA(p))
#define
JUST_
INTERACTIVE_SLEEP(p) \
#define INTERACTIVE_SLEEP(p) \
(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
...
@@ -168,7 +168,8 @@
...
@@ -168,7 +168,8 @@
*/
*/
#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1)))
((MAX_TIMESLICE - MIN_TIMESLICE) * \
(MAX_PRIO-1 - (p)->static_prio) / (MAX_USER_PRIO-1)))
static
inline
unsigned
int
task_timeslice
(
task_t
*
p
)
static
inline
unsigned
int
task_timeslice
(
task_t
*
p
)
{
{
...
@@ -199,7 +200,7 @@ struct prio_array {
...
@@ -199,7 +200,7 @@ struct prio_array {
struct
runqueue
{
struct
runqueue
{
spinlock_t
lock
;
spinlock_t
lock
;
unsigned
long
nr_running
,
nr_switches
,
expired_timestamp
,
unsigned
long
nr_running
,
nr_switches
,
expired_timestamp
,
nr_uninterruptible
,
timestamp_last_tick
;
nr_uninterruptible
,
timestamp_last_tick
;
task_t
*
curr
,
*
idle
;
task_t
*
curr
,
*
idle
;
struct
mm_struct
*
prev_mm
;
struct
mm_struct
*
prev_mm
;
prio_array_t
*
active
,
*
expired
,
arrays
[
2
];
prio_array_t
*
active
,
*
expired
,
arrays
[
2
];
...
@@ -225,7 +226,7 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
...
@@ -225,7 +226,7 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
* Default context-switch locking:
* Default context-switch locking:
*/
*/
#ifndef prepare_arch_switch
#ifndef prepare_arch_switch
# define prepare_arch_switch(rq, next) do { } while(0)
# define prepare_arch_switch(rq, next) do { } while
(0)
# define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
# define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
# define task_running(rq, p) ((rq)->curr == (p))
# define task_running(rq, p) ((rq)->curr == (p))
#endif
#endif
...
@@ -269,9 +270,9 @@ __init void node_nr_running_init(void)
...
@@ -269,9 +270,9 @@ __init void node_nr_running_init(void)
#else
/* !CONFIG_NUMA */
#else
/* !CONFIG_NUMA */
# define nr_running_init(rq)
do { } while (0)
# define nr_running_init(rq)
do { } while (0)
# define nr_running_inc(rq)
do { (rq)->nr_running++; } while (0)
# define nr_running_inc(rq)
do { (rq)->nr_running++; } while (0)
# define nr_running_dec(rq)
do { (rq)->nr_running--; } while (0)
# define nr_running_dec(rq)
do { (rq)->nr_running--; } while (0)
#endif
/* CONFIG_NUMA */
#endif
/* CONFIG_NUMA */
...
@@ -396,7 +397,7 @@ static void recalc_task_prio(task_t *p, unsigned long long now)
...
@@ -396,7 +397,7 @@ static void recalc_task_prio(task_t *p, unsigned long long now)
* other processes.
* other processes.
*/
*/
if
(
p
->
mm
&&
p
->
activated
!=
-
1
&&
if
(
p
->
mm
&&
p
->
activated
!=
-
1
&&
sleep_time
>
JUST_INTERACTIVE_SLEEP
(
p
))
{
sleep_time
>
INTERACTIVE_SLEEP
(
p
))
{
p
->
sleep_avg
=
JIFFIES_TO_NS
(
MAX_SLEEP_AVG
-
p
->
sleep_avg
=
JIFFIES_TO_NS
(
MAX_SLEEP_AVG
-
AVG_TIMESLICE
);
AVG_TIMESLICE
);
if
(
!
HIGH_CREDIT
(
p
))
if
(
!
HIGH_CREDIT
(
p
))
...
@@ -413,37 +414,35 @@ static void recalc_task_prio(task_t *p, unsigned long long now)
...
@@ -413,37 +414,35 @@ static void recalc_task_prio(task_t *p, unsigned long long now)
* one timeslice worth of sleep avg bonus.
* one timeslice worth of sleep avg bonus.
*/
*/
if
(
LOW_CREDIT
(
p
)
&&
if
(
LOW_CREDIT
(
p
)
&&
sleep_time
>
JIFFIES_TO_NS
(
task_timeslice
(
p
)))
sleep_time
>
JIFFIES_TO_NS
(
task_timeslice
(
p
)))
sleep_time
=
sleep_time
=
JIFFIES_TO_NS
(
task_timeslice
(
p
));
JIFFIES_TO_NS
(
task_timeslice
(
p
));
/*
/*
* Non high_credit tasks waking from uninterruptible
* Non high_credit tasks waking from uninterruptible
* sleep are limited in their sleep_avg rise as they
* sleep are limited in their sleep_avg rise as they
* are likely to be cpu hogs waiting on I/O
* are likely to be cpu hogs waiting on I/O
*/
*/
if
(
p
->
activated
==
-
1
&&
!
HIGH_CREDIT
(
p
)
&&
p
->
mm
){
if
(
p
->
activated
==
-
1
&&
!
HIGH_CREDIT
(
p
)
&&
p
->
mm
)
{
if
(
p
->
sleep_avg
>=
JUST_
INTERACTIVE_SLEEP
(
p
))
if
(
p
->
sleep_avg
>=
INTERACTIVE_SLEEP
(
p
))
sleep_time
=
0
;
sleep_time
=
0
;
else
if
(
p
->
sleep_avg
+
sleep_time
>=
else
if
(
p
->
sleep_avg
+
sleep_time
>=
JUST_INTERACTIVE_SLEEP
(
p
)){
INTERACTIVE_SLEEP
(
p
))
{
p
->
sleep_avg
=
p
->
sleep_avg
=
INTERACTIVE_SLEEP
(
p
);
JUST_INTERACTIVE_SLEEP
(
p
);
sleep_time
=
0
;
sleep_time
=
0
;
}
}
}
}
/*
/*
* This code gives a bonus to interactive tasks.
* This code gives a bonus to interactive tasks.
*
*
* The boost works by updating the 'average sleep time'
* The boost works by updating the 'average sleep time'
* value here, based on ->timestamp. The more time a
task
* value here, based on ->timestamp. The more time a
*
spends sleeping, the higher the average gets - and the
*
task spends sleeping, the higher the average gets -
* higher the priority boost gets as well.
*
and the
higher the priority boost gets as well.
*/
*/
p
->
sleep_avg
+=
sleep_time
;
p
->
sleep_avg
+=
sleep_time
;
if
(
p
->
sleep_avg
>
NS_MAX_SLEEP_AVG
){
if
(
p
->
sleep_avg
>
NS_MAX_SLEEP_AVG
)
{
p
->
sleep_avg
=
NS_MAX_SLEEP_AVG
;
p
->
sleep_avg
=
NS_MAX_SLEEP_AVG
;
if
(
!
HIGH_CREDIT
(
p
))
if
(
!
HIGH_CREDIT
(
p
))
p
->
interactive_credit
++
;
p
->
interactive_credit
++
;
...
@@ -470,7 +469,7 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
...
@@ -470,7 +469,7 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
* This checks to make sure it's not an uninterruptible task
* This checks to make sure it's not an uninterruptible task
* that is now waking up.
* that is now waking up.
*/
*/
if
(
!
p
->
activated
){
if
(
!
p
->
activated
)
{
/*
/*
* Tasks which were woken up by interrupts (ie. hw events)
* Tasks which were woken up by interrupts (ie. hw events)
* are most likely of interactive nature. So we give them
* are most likely of interactive nature. So we give them
...
@@ -480,13 +479,14 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
...
@@ -480,13 +479,14 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
*/
*/
if
(
in_interrupt
())
if
(
in_interrupt
())
p
->
activated
=
2
;
p
->
activated
=
2
;
else
else
{
/*
/*
* Normal first-time wakeups get a credit too for on-runqueue
* Normal first-time wakeups get a credit too for
*
time, but it will be weighted down:
* on-runqueue
time, but it will be weighted down:
*/
*/
p
->
activated
=
1
;
p
->
activated
=
1
;
}
}
}
p
->
timestamp
=
now
;
p
->
timestamp
=
now
;
__activate_task
(
p
,
rq
);
__activate_task
(
p
,
rq
);
...
@@ -632,13 +632,14 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
...
@@ -632,13 +632,14 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
*/
*/
if
(
unlikely
(
sync
&&
!
task_running
(
rq
,
p
)
&&
if
(
unlikely
(
sync
&&
!
task_running
(
rq
,
p
)
&&
(
task_cpu
(
p
)
!=
smp_processor_id
())
&&
(
task_cpu
(
p
)
!=
smp_processor_id
())
&&
cpu_isset
(
smp_processor_id
(),
p
->
cpus_allowed
)))
{
cpu_isset
(
smp_processor_id
(),
p
->
cpus_allowed
)))
{
set_task_cpu
(
p
,
smp_processor_id
());
set_task_cpu
(
p
,
smp_processor_id
());
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
repeat_lock_task
;
goto
repeat_lock_task
;
}
}
if
(
old_state
==
TASK_UNINTERRUPTIBLE
){
if
(
old_state
==
TASK_UNINTERRUPTIBLE
)
{
rq
->
nr_uninterruptible
--
;
rq
->
nr_uninterruptible
--
;
/*
/*
* Tasks on involuntary sleep don't earn
* Tasks on involuntary sleep don't earn
...
@@ -663,7 +664,8 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
...
@@ -663,7 +664,8 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
}
}
int
wake_up_process
(
task_t
*
p
)
int
wake_up_process
(
task_t
*
p
)
{
{
return
try_to_wake_up
(
p
,
TASK_STOPPED
|
TASK_INTERRUPTIBLE
|
TASK_UNINTERRUPTIBLE
,
0
);
return
try_to_wake_up
(
p
,
TASK_STOPPED
|
TASK_INTERRUPTIBLE
|
TASK_UNINTERRUPTIBLE
,
0
);
}
}
EXPORT_SYMBOL
(
wake_up_process
);
EXPORT_SYMBOL
(
wake_up_process
);
...
@@ -704,7 +706,7 @@ void sched_fork(task_t *p)
...
@@ -704,7 +706,7 @@ void sched_fork(task_t *p)
* resulting in more scheduling fairness.
* resulting in more scheduling fairness.
*/
*/
local_irq_disable
();
local_irq_disable
();
p
->
time_slice
=
(
current
->
time_slice
+
1
)
>>
1
;
p
->
time_slice
=
(
current
->
time_slice
+
1
)
>>
1
;
/*
/*
* The remainder of the first timeslice might be recovered by
* The remainder of the first timeslice might be recovered by
* the parent if the child exits early enough.
* the parent if the child exits early enough.
...
@@ -854,7 +856,8 @@ asmlinkage void schedule_tail(task_t *prev)
...
@@ -854,7 +856,8 @@ asmlinkage void schedule_tail(task_t *prev)
* context_switch - switch to the new MM and the new
* context_switch - switch to the new MM and the new
* thread's register state.
* thread's register state.
*/
*/
static
inline
task_t
*
context_switch
(
runqueue_t
*
rq
,
task_t
*
prev
,
task_t
*
next
)
static
inline
task_t
*
context_switch
(
runqueue_t
*
rq
,
task_t
*
prev
,
task_t
*
next
)
{
{
struct
mm_struct
*
mm
=
next
->
mm
;
struct
mm_struct
*
mm
=
next
->
mm
;
struct
mm_struct
*
oldmm
=
prev
->
active_mm
;
struct
mm_struct
*
oldmm
=
prev
->
active_mm
;
...
@@ -1002,10 +1005,10 @@ static int sched_best_cpu(struct task_struct *p)
...
@@ -1002,10 +1005,10 @@ static int sched_best_cpu(struct task_struct *p)
minload
=
10000000
;
minload
=
10000000
;
for_each_node_with_cpus
(
i
)
{
for_each_node_with_cpus
(
i
)
{
/*
/*
* Node load is always divided by nr_cpus_node to normalise
* Node load is always divided by nr_cpus_node to normalise
* load values in case cpu count differs from node to node.
* load values in case cpu count differs from node to node.
* We first multiply node_nr_running by 10 to get a little
* We first multiply node_nr_running by 10 to get a little
* better resolution.
* better resolution.
*/
*/
load
=
10
*
atomic_read
(
&
node_nr_running
[
i
])
/
nr_cpus_node
(
i
);
load
=
10
*
atomic_read
(
&
node_nr_running
[
i
])
/
nr_cpus_node
(
i
);
if
(
load
<
minload
)
{
if
(
load
<
minload
)
{
...
@@ -1044,7 +1047,7 @@ void sched_balance_exec(void)
...
@@ -1044,7 +1047,7 @@ void sched_balance_exec(void)
* load_{t} = load_{t-1}/2 + nr_node_running_{t}
* load_{t} = load_{t-1}/2 + nr_node_running_{t}
* This way sudden load peaks are flattened out a bit.
* This way sudden load peaks are flattened out a bit.
* Node load is divided by nr_cpus_node() in order to compare nodes
* Node load is divided by nr_cpus_node() in order to compare nodes
* of different cpu count but also [first] multiplied by 10 to
* of different cpu count but also [first] multiplied by 10 to
* provide better resolution.
* provide better resolution.
*/
*/
static
int
find_busiest_node
(
int
this_node
)
static
int
find_busiest_node
(
int
this_node
)
...
@@ -1082,8 +1085,10 @@ static int find_busiest_node(int this_node)
...
@@ -1082,8 +1085,10 @@ static int find_busiest_node(int this_node)
* this_rq is locked already. Recalculate nr_running if we have to
* this_rq is locked already. Recalculate nr_running if we have to
* drop the runqueue lock.
* drop the runqueue lock.
*/
*/
static
inline
unsigned
int
double_lock_balance
(
runqueue_t
*
this_rq
,
static
inline
runqueue_t
*
busiest
,
int
this_cpu
,
int
idle
,
unsigned
int
nr_running
)
unsigned
int
double_lock_balance
(
runqueue_t
*
this_rq
,
runqueue_t
*
busiest
,
int
this_cpu
,
int
idle
,
unsigned
int
nr_running
)
{
{
if
(
unlikely
(
!
spin_trylock
(
&
busiest
->
lock
)))
{
if
(
unlikely
(
!
spin_trylock
(
&
busiest
->
lock
)))
{
if
(
busiest
<
this_rq
)
{
if
(
busiest
<
this_rq
)
{
...
@@ -1091,7 +1096,8 @@ static inline unsigned int double_lock_balance(runqueue_t *this_rq,
...
@@ -1091,7 +1096,8 @@ static inline unsigned int double_lock_balance(runqueue_t *this_rq,
spin_lock
(
&
busiest
->
lock
);
spin_lock
(
&
busiest
->
lock
);
spin_lock
(
&
this_rq
->
lock
);
spin_lock
(
&
this_rq
->
lock
);
/* Need to recalculate nr_running */
/* Need to recalculate nr_running */
if
(
idle
||
(
this_rq
->
nr_running
>
this_rq
->
prev_cpu_load
[
this_cpu
]))
if
(
idle
||
(
this_rq
->
nr_running
>
this_rq
->
prev_cpu_load
[
this_cpu
]))
nr_running
=
this_rq
->
nr_running
;
nr_running
=
this_rq
->
nr_running
;
else
else
nr_running
=
this_rq
->
prev_cpu_load
[
this_cpu
];
nr_running
=
this_rq
->
prev_cpu_load
[
this_cpu
];
...
@@ -1104,7 +1110,9 @@ static inline unsigned int double_lock_balance(runqueue_t *this_rq,
...
@@ -1104,7 +1110,9 @@ static inline unsigned int double_lock_balance(runqueue_t *this_rq,
/*
/*
* find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
* find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
*/
*/
static
inline
runqueue_t
*
find_busiest_queue
(
runqueue_t
*
this_rq
,
int
this_cpu
,
int
idle
,
int
*
imbalance
,
cpumask_t
cpumask
)
static
inline
runqueue_t
*
find_busiest_queue
(
runqueue_t
*
this_rq
,
int
this_cpu
,
int
idle
,
int
*
imbalance
,
cpumask_t
cpumask
)
{
{
int
nr_running
,
load
,
max_load
,
i
;
int
nr_running
,
load
,
max_load
,
i
;
runqueue_t
*
busiest
,
*
rq_src
;
runqueue_t
*
busiest
,
*
rq_src
;
...
@@ -1167,7 +1175,8 @@ static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu,
...
@@ -1167,7 +1175,8 @@ static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu,
goto
out
;
goto
out
;
}
}
nr_running
=
double_lock_balance
(
this_rq
,
busiest
,
this_cpu
,
idle
,
nr_running
);
nr_running
=
double_lock_balance
(
this_rq
,
busiest
,
this_cpu
,
idle
,
nr_running
);
/*
/*
* Make sure nothing changed since we checked the
* Make sure nothing changed since we checked the
* runqueue length.
* runqueue length.
...
@@ -1184,14 +1193,17 @@ static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu,
...
@@ -1184,14 +1193,17 @@ static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu,
* pull_task - move a task from a remote runqueue to the local runqueue.
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
* Both runqueues must be locked.
*/
*/
static
inline
void
pull_task
(
runqueue_t
*
src_rq
,
prio_array_t
*
src_array
,
task_t
*
p
,
runqueue_t
*
this_rq
,
int
this_cpu
)
static
inline
void
pull_task
(
runqueue_t
*
src_rq
,
prio_array_t
*
src_array
,
task_t
*
p
,
runqueue_t
*
this_rq
,
int
this_cpu
)
{
{
dequeue_task
(
p
,
src_array
);
dequeue_task
(
p
,
src_array
);
nr_running_dec
(
src_rq
);
nr_running_dec
(
src_rq
);
set_task_cpu
(
p
,
this_cpu
);
set_task_cpu
(
p
,
this_cpu
);
nr_running_inc
(
this_rq
);
nr_running_inc
(
this_rq
);
enqueue_task
(
p
,
this_rq
->
active
);
enqueue_task
(
p
,
this_rq
->
active
);
p
->
timestamp
=
sched_clock
()
-
(
src_rq
->
timestamp_last_tick
-
p
->
timestamp
);
p
->
timestamp
=
sched_clock
()
-
(
src_rq
->
timestamp_last_tick
-
p
->
timestamp
);
/*
/*
* Note that idle threads have a prio of MAX_PRIO, for this test
* Note that idle threads have a prio of MAX_PRIO, for this test
* to be always true for them.
* to be always true for them.
...
@@ -1203,8 +1215,8 @@ static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t
...
@@ -1203,8 +1215,8 @@ static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t
/*
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
*/
static
inline
int
static
inline
can_migrate_task
(
task_t
*
tsk
,
runqueue_t
*
rq
,
int
this_cpu
,
int
idle
)
int
can_migrate_task
(
task_t
*
tsk
,
runqueue_t
*
rq
,
int
this_cpu
,
int
idle
)
{
{
unsigned
long
delta
=
rq
->
timestamp_last_tick
-
tsk
->
timestamp
;
unsigned
long
delta
=
rq
->
timestamp_last_tick
-
tsk
->
timestamp
;
...
@@ -1239,7 +1251,8 @@ static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask)
...
@@ -1239,7 +1251,8 @@ static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask)
struct
list_head
*
head
,
*
curr
;
struct
list_head
*
head
,
*
curr
;
task_t
*
tmp
;
task_t
*
tmp
;
busiest
=
find_busiest_queue
(
this_rq
,
this_cpu
,
idle
,
&
imbalance
,
cpumask
);
busiest
=
find_busiest_queue
(
this_rq
,
this_cpu
,
idle
,
&
imbalance
,
cpumask
);
if
(
!
busiest
)
if
(
!
busiest
)
goto
out
;
goto
out
;
...
@@ -1381,7 +1394,7 @@ static inline void rebalance_tick(runqueue_t *this_rq, int idle)
...
@@ -1381,7 +1394,7 @@ static inline void rebalance_tick(runqueue_t *this_rq, int idle)
}
}
#endif
#endif
DEFINE_PER_CPU
(
struct
kernel_stat
,
kstat
)
=
{
{
0
}
}
;
DEFINE_PER_CPU
(
struct
kernel_stat
,
kstat
);
EXPORT_PER_CPU_SYMBOL
(
kstat
);
EXPORT_PER_CPU_SYMBOL
(
kstat
);
...
@@ -1399,7 +1412,7 @@ EXPORT_PER_CPU_SYMBOL(kstat);
...
@@ -1399,7 +1412,7 @@ EXPORT_PER_CPU_SYMBOL(kstat);
((STARVATION_LIMIT && ((rq)->expired_timestamp && \
((STARVATION_LIMIT && ((rq)->expired_timestamp && \
(jiffies - (rq)->expired_timestamp >= \
(jiffies - (rq)->expired_timestamp >= \
STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
((rq)->curr->static_prio > (rq)->best_expired_prio))
((rq)->curr->static_prio > (rq)->best_expired_prio))
/*
/*
* This function gets called by the timer code, with HZ frequency.
* This function gets called by the timer code, with HZ frequency.
...
@@ -1630,7 +1643,7 @@ asmlinkage void schedule(void)
...
@@ -1630,7 +1643,7 @@ asmlinkage void schedule(void)
RCU_qsctr
(
task_cpu
(
prev
))
++
;
RCU_qsctr
(
task_cpu
(
prev
))
++
;
prev
->
sleep_avg
-=
run_time
;
prev
->
sleep_avg
-=
run_time
;
if
((
long
)
prev
->
sleep_avg
<=
0
){
if
((
long
)
prev
->
sleep_avg
<=
0
)
{
prev
->
sleep_avg
=
0
;
prev
->
sleep_avg
=
0
;
if
(
!
(
HIGH_CREDIT
(
prev
)
||
LOW_CREDIT
(
prev
)))
if
(
!
(
HIGH_CREDIT
(
prev
)
||
LOW_CREDIT
(
prev
)))
prev
->
interactive_credit
--
;
prev
->
interactive_credit
--
;
...
@@ -1707,7 +1720,8 @@ EXPORT_SYMBOL(default_wake_function);
...
@@ -1707,7 +1720,8 @@ EXPORT_SYMBOL(default_wake_function);
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
*/
static
void
__wake_up_common
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr_exclusive
,
int
sync
)
static
void
__wake_up_common
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr_exclusive
,
int
sync
)
{
{
struct
list_head
*
tmp
,
*
next
;
struct
list_head
*
tmp
,
*
next
;
...
@@ -1784,7 +1798,8 @@ void complete(struct completion *x)
...
@@ -1784,7 +1798,8 @@ void complete(struct completion *x)
spin_lock_irqsave
(
&
x
->
wait
.
lock
,
flags
);
spin_lock_irqsave
(
&
x
->
wait
.
lock
,
flags
);
x
->
done
++
;
x
->
done
++
;
__wake_up_common
(
&
x
->
wait
,
TASK_UNINTERRUPTIBLE
|
TASK_INTERRUPTIBLE
,
1
,
0
);
__wake_up_common
(
&
x
->
wait
,
TASK_UNINTERRUPTIBLE
|
TASK_INTERRUPTIBLE
,
1
,
0
);
spin_unlock_irqrestore
(
&
x
->
wait
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
x
->
wait
.
lock
,
flags
);
}
}
...
@@ -1796,7 +1811,8 @@ void complete_all(struct completion *x)
...
@@ -1796,7 +1811,8 @@ void complete_all(struct completion *x)
spin_lock_irqsave
(
&
x
->
wait
.
lock
,
flags
);
spin_lock_irqsave
(
&
x
->
wait
.
lock
,
flags
);
x
->
done
+=
UINT_MAX
/
2
;
x
->
done
+=
UINT_MAX
/
2
;
__wake_up_common
(
&
x
->
wait
,
TASK_UNINTERRUPTIBLE
|
TASK_INTERRUPTIBLE
,
0
,
0
);
__wake_up_common
(
&
x
->
wait
,
TASK_UNINTERRUPTIBLE
|
TASK_INTERRUPTIBLE
,
0
,
0
);
spin_unlock_irqrestore
(
&
x
->
wait
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
x
->
wait
.
lock
,
flags
);
}
}
...
@@ -1823,9 +1839,9 @@ void wait_for_completion(struct completion *x)
...
@@ -1823,9 +1839,9 @@ void wait_for_completion(struct completion *x)
EXPORT_SYMBOL
(
wait_for_completion
);
EXPORT_SYMBOL
(
wait_for_completion
);
#define SLEEP_ON_VAR \
#define SLEEP_ON_VAR
\
unsigned long flags; \
unsigned long flags;
\
wait_queue_t wait; \
wait_queue_t wait;
\
init_waitqueue_entry(&wait, current);
init_waitqueue_entry(&wait, current);
#define SLEEP_ON_HEAD \
#define SLEEP_ON_HEAD \
...
@@ -1833,9 +1849,9 @@ EXPORT_SYMBOL(wait_for_completion);
...
@@ -1833,9 +1849,9 @@ EXPORT_SYMBOL(wait_for_completion);
__add_wait_queue(q, &wait); \
__add_wait_queue(q, &wait); \
spin_unlock(&q->lock);
spin_unlock(&q->lock);
#define SLEEP_ON_TAIL
\
#define SLEEP_ON_TAIL \
spin_lock_irq(&q->lock);
\
spin_lock_irq(&q->lock); \
__remove_wait_queue(q, &wait);
\
__remove_wait_queue(q, &wait); \
spin_unlock_irqrestore(&q->lock, flags);
spin_unlock_irqrestore(&q->lock, flags);
void
interruptible_sleep_on
(
wait_queue_head_t
*
q
)
void
interruptible_sleep_on
(
wait_queue_head_t
*
q
)
...
@@ -1960,9 +1976,9 @@ asmlinkage long sys_nice(int increment)
...
@@ -1960,9 +1976,9 @@ asmlinkage long sys_nice(int increment)
long
nice
;
long
nice
;
/*
/*
*
Setpriority might change our priority at the same moment.
*
Setpriority might change our priority at the same moment.
*
We don't have to worry. Conceptually one call occurs first
*
We don't have to worry. Conceptually one call occurs first
*
and we have a single winner.
*
and we have a single winner.
*/
*/
if
(
increment
<
0
)
{
if
(
increment
<
0
)
{
if
(
!
capable
(
CAP_SYS_NICE
))
if
(
!
capable
(
CAP_SYS_NICE
))
...
@@ -2142,7 +2158,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
...
@@ -2142,7 +2158,7 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @param: structure containing the new RT priority.
* @param: structure containing the new RT priority.
*/
*/
asmlinkage
long
sys_sched_setscheduler
(
pid_t
pid
,
int
policy
,
asmlinkage
long
sys_sched_setscheduler
(
pid_t
pid
,
int
policy
,
struct
sched_param
__user
*
param
)
struct
sched_param
__user
*
param
)
{
{
return
setscheduler
(
pid
,
policy
,
param
);
return
setscheduler
(
pid
,
policy
,
param
);
}
}
...
@@ -2449,7 +2465,8 @@ asmlinkage long sys_sched_get_priority_min(int policy)
...
@@ -2449,7 +2465,8 @@ asmlinkage long sys_sched_get_priority_min(int policy)
* this syscall writes the default timeslice value of a given process
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
* into the user-space timespec buffer. A value of '0' means infinity.
*/
*/
asmlinkage
long
sys_sched_rr_get_interval
(
pid_t
pid
,
struct
timespec
__user
*
interval
)
asmlinkage
long
sys_sched_rr_get_interval
(
pid_t
pid
,
struct
timespec
__user
*
interval
)
{
{
int
retval
=
-
EINVAL
;
int
retval
=
-
EINVAL
;
struct
timespec
t
;
struct
timespec
t
;
...
@@ -2695,7 +2712,7 @@ static void move_task_away(struct task_struct *p, int dest_cpu)
...
@@ -2695,7 +2712,7 @@ static void move_task_away(struct task_struct *p, int dest_cpu)
}
}
p
->
timestamp
=
rq_dest
->
timestamp_last_tick
;
p
->
timestamp
=
rq_dest
->
timestamp_last_tick
;
out:
out:
double_rq_unlock
(
this_rq
(),
rq_dest
);
double_rq_unlock
(
this_rq
(),
rq_dest
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
...
@@ -2764,11 +2781,10 @@ static int migration_thread(void * data)
...
@@ -2764,11 +2781,10 @@ static int migration_thread(void * data)
* migration_call - callback that gets triggered when a CPU is added.
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
* Here we can start up the necessary migration thread for the new CPU.
*/
*/
static
int
migration_call
(
struct
notifier_block
*
nfb
,
static
int
migration_call
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
unsigned
long
action
,
void
*
hcpu
)
void
*
hcpu
)
{
{
long
cpu
=
(
long
)
hcpu
;
long
cpu
=
(
long
)
hcpu
;
migration_startup_t
startup
;
migration_startup_t
startup
;
switch
(
action
)
{
switch
(
action
)
{
...
@@ -2797,7 +2813,8 @@ static int migration_call(struct notifier_block *nfb,
...
@@ -2797,7 +2813,8 @@ static int migration_call(struct notifier_block *nfb,
return
NOTIFY_OK
;
return
NOTIFY_OK
;
}
}
static
struct
notifier_block
migration_notifier
=
{
&
migration_call
,
NULL
,
0
};
static
struct
notifier_block
migration_notifier
=
{
.
notifier_call
=
&
migration_call
};
__init
int
migration_init
(
void
)
__init
int
migration_init
(
void
)
{
{
...
@@ -2833,7 +2850,7 @@ static void kstat_init_cpu(int cpu)
...
@@ -2833,7 +2850,7 @@ static void kstat_init_cpu(int cpu)
}
}
static
int
__devinit
kstat_cpu_notify
(
struct
notifier_block
*
self
,
static
int
__devinit
kstat_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
unsigned
long
action
,
void
*
hcpu
)
{
{
int
cpu
=
(
unsigned
long
)
hcpu
;
int
cpu
=
(
unsigned
long
)
hcpu
;
switch
(
action
)
{
switch
(
action
)
{
...
@@ -2847,13 +2864,14 @@ static int __devinit kstat_cpu_notify(struct notifier_block *self,
...
@@ -2847,13 +2864,14 @@ static int __devinit kstat_cpu_notify(struct notifier_block *self,
}
}
static
struct
notifier_block
__devinitdata
kstat_nb
=
{
static
struct
notifier_block
__devinitdata
kstat_nb
=
{
.
notifier_call
=
kstat_cpu_notify
,
.
notifier_call
=
kstat_cpu_notify
,
.
next
=
NULL
,
.
next
=
NULL
,
};
};
__init
static
void
init_kstat
(
void
)
{
__init
static
void
init_kstat
(
void
)
{
kstat_cpu_notify
(
&
kstat_nb
,
(
unsigned
long
)
CPU_UP_PREPARE
,
kstat_cpu_notify
(
&
kstat_nb
,
(
unsigned
long
)
CPU_UP_PREPARE
,
(
void
*
)(
long
)
smp_processor_id
());
(
void
*
)(
long
)
smp_processor_id
());
register_cpu_notifier
(
&
kstat_nb
);
register_cpu_notifier
(
&
kstat_nb
);
}
}
...
@@ -2919,7 +2937,7 @@ void __might_sleep(char *file, int line)
...
@@ -2919,7 +2937,7 @@ void __might_sleep(char *file, int line)
printk
(
KERN_ERR
"Debug: sleeping function called from invalid"
printk
(
KERN_ERR
"Debug: sleeping function called from invalid"
" context at %s:%d
\n
"
,
file
,
line
);
" context at %s:%d
\n
"
,
file
,
line
);
printk
(
"in_atomic():%d, irqs_disabled():%d
\n
"
,
printk
(
"in_atomic():%d, irqs_disabled():%d
\n
"
,
in_atomic
(),
irqs_disabled
());
in_atomic
(),
irqs_disabled
());
dump_stack
();
dump_stack
();
}
}
#endif
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment