Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a4133765
Commit
a4133765
authored
Aug 13, 2012
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sched/urgent' into sched/core
parents
3bf671af
8f618968
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
70 additions
and
19 deletions
+70
-19
kernel/sched/core.c
kernel/sched/core.c
+21
-14
kernel/sched/fair.c
kernel/sched/fair.c
+9
-2
kernel/sched/rt.c
kernel/sched/rt.c
+13
-0
kernel/sched/sched.h
kernel/sched/sched.h
+6
-2
kernel/sched/stop_task.c
kernel/sched/stop_task.c
+21
-1
No files found.
kernel/sched/core.c
View file @
a4133765
...
...
@@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
static
cputime_t
scale_utime
(
cputime_t
utime
,
cputime_t
rtime
,
cputime_t
total
)
{
u64
temp
=
(
__force
u64
)
rtime
;
temp
*=
(
__force
u64
)
utime
;
if
(
sizeof
(
cputime_t
)
==
4
)
temp
=
div_u64
(
temp
,
(
__force
u32
)
total
);
else
temp
=
div64_u64
(
temp
,
(
__force
u64
)
total
);
return
(
__force
cputime_t
)
temp
;
}
void
task_times
(
struct
task_struct
*
p
,
cputime_t
*
ut
,
cputime_t
*
st
)
{
cputime_t
rtime
,
utime
=
p
->
utime
,
total
=
utime
+
p
->
stime
;
...
...
@@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
*/
rtime
=
nsecs_to_cputime
(
p
->
se
.
sum_exec_runtime
);
if
(
total
)
{
u64
temp
=
(
__force
u64
)
rtime
;
temp
*=
(
__force
u64
)
utime
;
do_div
(
temp
,
(
__force
u32
)
total
);
utime
=
(
__force
cputime_t
)
temp
;
}
else
if
(
total
)
utime
=
scale_utime
(
utime
,
rtime
,
total
);
else
utime
=
rtime
;
/*
...
...
@@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
total
=
cputime
.
utime
+
cputime
.
stime
;
rtime
=
nsecs_to_cputime
(
cputime
.
sum_exec_runtime
);
if
(
total
)
{
u64
temp
=
(
__force
u64
)
rtime
;
temp
*=
(
__force
u64
)
cputime
.
utime
;
do_div
(
temp
,
(
__force
u32
)
total
);
utime
=
(
__force
cputime_t
)
temp
;
}
else
if
(
total
)
utime
=
scale_utime
(
cputime
.
utime
,
rtime
,
total
);
else
utime
=
rtime
;
sig
->
prev_utime
=
max
(
sig
->
prev_utime
,
utime
);
...
...
@@ -7246,6 +7252,7 @@ int in_sched_functions(unsigned long addr)
#ifdef CONFIG_CGROUP_SCHED
struct
task_group
root_task_group
;
LIST_HEAD
(
task_groups
);
#endif
DECLARE_PER_CPU
(
cpumask_var_t
,
load_balance_tmpmask
);
...
...
kernel/sched/fair.c
View file @
a4133765
...
...
@@ -3387,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data)
static
void
update_h_load
(
long
cpu
)
{
struct
rq
*
rq
=
cpu_rq
(
cpu
);
unsigned
long
now
=
jiffies
;
if
(
rq
->
h_load_throttle
==
now
)
return
;
rq
->
h_load_throttle
=
now
;
rcu_read_lock
();
walk_tg_tree
(
tg_load_down
,
tg_nop
,
(
void
*
)
cpu
);
rcu_read_unlock
();
...
...
@@ -4293,11 +4301,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
env
.
src_rq
=
busiest
;
env
.
loop_max
=
min
(
sysctl_sched_nr_migrate
,
busiest
->
nr_running
);
update_h_load
(
env
.
src_cpu
);
more_balance:
local_irq_save
(
flags
);
double_rq_lock
(
this_rq
,
busiest
);
if
(
!
env
.
loop
)
update_h_load
(
env
.
src_cpu
);
/*
* cur_ld_moved - load moved in current iteration
...
...
kernel/sched/rt.c
View file @
a4133765
...
...
@@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
const
struct
cpumask
*
span
;
span
=
sched_rt_period_mask
();
#ifdef CONFIG_RT_GROUP_SCHED
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
* the timer run on a CPU which does not service all runqueues,
* potentially leaving other CPUs indefinitely throttled. If
* isolation is really required, the user will turn the throttle
* off to kill the perturbations it causes anyway. Meanwhile,
* this maintains functionality for boot and/or troubleshooting.
*/
if
(
rt_b
==
&
root_task_group
.
rt_bandwidth
)
span
=
cpu_online_mask
;
#endif
for_each_cpu
(
i
,
span
)
{
int
enqueue
=
0
;
struct
rt_rq
*
rt_rq
=
sched_rt_period_rt_rq
(
rt_b
,
i
);
...
...
kernel/sched/sched.h
View file @
a4133765
...
...
@@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex;
struct
cfs_rq
;
struct
rt_rq
;
static
LIST_HEAD
(
task_groups
)
;
extern
struct
list_head
task_groups
;
struct
cfs_bandwidth
{
#ifdef CONFIG_CFS_BANDWIDTH
...
...
@@ -374,7 +374,11 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct
list_head
leaf_cfs_rq_list
;
#endif
#ifdef CONFIG_SMP
unsigned
long
h_load_throttle
;
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
struct
list_head
leaf_rt_rq_list
;
#endif
...
...
kernel/sched/stop_task.c
View file @
a4133765
...
...
@@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
{
struct
task_struct
*
stop
=
rq
->
stop
;
if
(
stop
&&
stop
->
on_rq
)
if
(
stop
&&
stop
->
on_rq
)
{
stop
->
se
.
exec_start
=
rq
->
clock_task
;
return
stop
;
}
return
NULL
;
}
...
...
@@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)
static
void
put_prev_task_stop
(
struct
rq
*
rq
,
struct
task_struct
*
prev
)
{
struct
task_struct
*
curr
=
rq
->
curr
;
u64
delta_exec
;
delta_exec
=
rq
->
clock_task
-
curr
->
se
.
exec_start
;
if
(
unlikely
((
s64
)
delta_exec
<
0
))
delta_exec
=
0
;
schedstat_set
(
curr
->
se
.
statistics
.
exec_max
,
max
(
curr
->
se
.
statistics
.
exec_max
,
delta_exec
));
curr
->
se
.
sum_exec_runtime
+=
delta_exec
;
account_group_exec_runtime
(
curr
,
delta_exec
);
curr
->
se
.
exec_start
=
rq
->
clock_task
;
cpuacct_charge
(
curr
,
delta_exec
);
}
static
void
task_tick_stop
(
struct
rq
*
rq
,
struct
task_struct
*
curr
,
int
queued
)
...
...
@@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
static
void
set_curr_task_stop
(
struct
rq
*
rq
)
{
struct
task_struct
*
stop
=
rq
->
stop
;
stop
->
se
.
exec_start
=
rq
->
clock_task
;
}
static
void
switched_to_stop
(
struct
rq
*
rq
,
struct
task_struct
*
p
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment