Commit a4ec24b4 authored by Dmitry Adamushko's avatar Dmitry Adamushko Committed by Ingo Molnar

sched: tidy up SCHED_RR

- make timeslices of SCHED_RR tasks constant and not
dependent on task's static_prio [1] ;
- remove obsolete code (timeslice related bits);
- make sched_rr_get_interval() return something more
meaningful [2] for SCHED_OTHER tasks.

[1] according to the following link, it's not compliant with SUSv3
(not sure though, what is the reference for us :-)
http://lkml.org/lkml/2007/3/7/656

[2] the interval is dynamic and can be depicted as follows "should a
task be one of the runnable tasks at this particular moment, it would
expect to run for this interval of time before being re-scheduled by the
scheduler tick".
(i.e. it's more precise if a task is runnable at the moment)

yeah, this seems to require task_rq_lock/unlock() but this is not a hot
path.

results:

(SCHED_FIFO)

dimm@earth:~/storage/prog$ sudo chrt -f 10 ./rr_interval 
time_slice: 0 : 0

(SCHED_RR)

dimm@earth:~/storage/prog$ sudo chrt 10 ./rr_interval 
time_slice: 0 : 99984800

(SCHED_NORMAL)

dimm@earth:~/storage/prog$ ./rr_interval 
time_slice: 0 : 19996960

(SCHED_NORMAL + a cpu_hog of similar 'weight' on the same CPU --- so should be a half of the previous result)

dimm@earth:~/storage/prog$ taskset 1 ./rr_interval 
time_slice: 0 : 9998480
Signed-off-by: default avatarDmitry Adamushko <dmitry.adamushko@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a9957449
...@@ -96,7 +96,7 @@ unsigned long long __attribute__((weak)) sched_clock(void) ...@@ -96,7 +96,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
/* /*
* Some helpers for converting nanosecond timing to jiffy resolution * Some helpers for converting nanosecond timing to jiffy resolution
*/ */
#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ)) #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ))
#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE #define NICE_0_LOAD SCHED_LOAD_SCALE
...@@ -105,11 +105,9 @@ unsigned long long __attribute__((weak)) sched_clock(void) ...@@ -105,11 +105,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
/* /*
* These are the 'tuning knobs' of the scheduler: * These are the 'tuning knobs' of the scheduler:
* *
* Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger), * default timeslice is 100 msecs (used only for SCHED_RR tasks).
* default timeslice is 100 msecs, maximum timeslice is 800 msecs.
* Timeslices get refilled after they expire. * Timeslices get refilled after they expire.
*/ */
#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
#define DEF_TIMESLICE (100 * HZ / 1000) #define DEF_TIMESLICE (100 * HZ / 1000)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -133,24 +131,6 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) ...@@ -133,24 +131,6 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
} }
#endif #endif
#define SCALE_PRIO(x, prio) \
max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
/*
* static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
* to time slice values: [800ms ... 100ms ... 5ms]
*/
static unsigned int static_prio_timeslice(int static_prio)
{
if (static_prio == NICE_TO_PRIO(19))
return 1;
if (static_prio < NICE_TO_PRIO(0))
return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
else
return SCALE_PRIO(DEF_TIMESLICE, static_prio);
}
static inline int rt_policy(int policy) static inline int rt_policy(int policy)
{ {
if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
...@@ -4746,6 +4726,7 @@ asmlinkage ...@@ -4746,6 +4726,7 @@ asmlinkage
long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
{ {
struct task_struct *p; struct task_struct *p;
unsigned int time_slice;
int retval = -EINVAL; int retval = -EINVAL;
struct timespec t; struct timespec t;
...@@ -4762,9 +4743,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) ...@@ -4762,9 +4743,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval) if (retval)
goto out_unlock; goto out_unlock;
jiffies_to_timespec(p->policy == SCHED_FIFO ? if (p->policy == SCHED_FIFO)
0 : static_prio_timeslice(p->static_prio), &t); time_slice = 0;
else if (p->policy == SCHED_RR)
time_slice = DEF_TIMESLICE;
else {
struct sched_entity *se = &p->se;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(p, &flags);
time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
task_rq_unlock(rq, &flags);
}
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
out_nounlock: out_nounlock:
return retval; return retval;
......
...@@ -206,7 +206,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) ...@@ -206,7 +206,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
if (--p->time_slice) if (--p->time_slice)
return; return;
p->time_slice = static_prio_timeslice(p->static_prio); p->time_slice = DEF_TIMESLICE;
/* /*
* Requeue to the end of queue if we are not the only element * Requeue to the end of queue if we are not the only element
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment