Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
7e54bc75
Commit
7e54bc75
authored
Feb 11, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
merge to the -K3 scheduler.
parent
14d39718
Changes
14
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
249 additions
and
210 deletions
+249
-210
arch/i386/kernel/entry.S
arch/i386/kernel/entry.S
+2
-0
fs/binfmt_elf.c
fs/binfmt_elf.c
+1
-1
fs/proc/array.c
fs/proc/array.c
+2
-6
include/asm-i386/bitops.h
include/asm-i386/bitops.h
+1
-1
include/asm-i386/mmu_context.h
include/asm-i386/mmu_context.h
+4
-10
include/linux/init_task.h
include/linux/init_task.h
+2
-1
include/linux/sched.h
include/linux/sched.h
+8
-63
kernel/exit.c
kernel/exit.c
+3
-24
kernel/fork.c
kernel/fork.c
+1
-4
kernel/ksyms.c
kernel/ksyms.c
+2
-0
kernel/sched.c
kernel/sched.c
+218
-85
kernel/sys.c
kernel/sys.c
+2
-2
kernel/timer.c
kernel/timer.c
+1
-11
mm/oom_kill.c
mm/oom_kill.c
+2
-2
No files found.
arch/i386/kernel/entry.S
View file @
7e54bc75
...
@@ -190,9 +190,11 @@ ENTRY(lcall27)
...
@@ -190,9 +190,11 @@ ENTRY(lcall27)
ENTRY
(
ret_from_fork
)
ENTRY
(
ret_from_fork
)
#if CONFIG_SMP
pushl
%
ebx
pushl
%
ebx
call
SYMBOL_NAME
(
schedule_tail
)
call
SYMBOL_NAME
(
schedule_tail
)
addl
$
4
,
%
esp
addl
$
4
,
%
esp
#endif
GET_THREAD_INFO
(%
ebx
)
GET_THREAD_INFO
(%
ebx
)
jmp
syscall_exit
jmp
syscall_exit
...
...
fs/binfmt_elf.c
View file @
7e54bc75
...
@@ -1119,7 +1119,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
...
@@ -1119,7 +1119,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
psinfo
.
pr_state
=
i
;
psinfo
.
pr_state
=
i
;
psinfo
.
pr_sname
=
(
i
<
0
||
i
>
5
)
?
'.'
:
"RSDZTD"
[
i
];
psinfo
.
pr_sname
=
(
i
<
0
||
i
>
5
)
?
'.'
:
"RSDZTD"
[
i
];
psinfo
.
pr_zomb
=
psinfo
.
pr_sname
==
'Z'
;
psinfo
.
pr_zomb
=
psinfo
.
pr_sname
==
'Z'
;
psinfo
.
pr_nice
=
current
->
__nice
;
psinfo
.
pr_nice
=
task_nice
(
current
)
;
psinfo
.
pr_flag
=
current
->
flags
;
psinfo
.
pr_flag
=
current
->
flags
;
psinfo
.
pr_uid
=
NEW_TO_OLD_UID
(
current
->
uid
);
psinfo
.
pr_uid
=
NEW_TO_OLD_UID
(
current
->
uid
);
psinfo
.
pr_gid
=
NEW_TO_OLD_GID
(
current
->
gid
);
psinfo
.
pr_gid
=
NEW_TO_OLD_GID
(
current
->
gid
);
...
...
fs/proc/array.c
View file @
7e54bc75
...
@@ -336,12 +336,8 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
...
@@ -336,12 +336,8 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
/* scale priority and nice values from timeslices to -20..20 */
/* scale priority and nice values from timeslices to -20..20 */
/* to make it look like a "normal" Unix priority/nice value */
/* to make it look like a "normal" Unix priority/nice value */
priority
=
task
->
prio
;
priority
=
task_prio
(
task
);
if
(
priority
>=
MAX_RT_PRIO
)
nice
=
task_nice
(
task
);
priority
-=
MAX_RT_PRIO
;
else
priority
=
priority
-
100
;
nice
=
task
->
__nice
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
ppid
=
task
->
pid
?
task
->
p_opptr
->
pid
:
0
;
ppid
=
task
->
pid
?
task
->
p_opptr
->
pid
:
0
;
...
...
include/asm-i386/bitops.h
View file @
7e54bc75
...
@@ -358,7 +358,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
...
@@ -358,7 +358,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
* @offset: The bitnumber to start searching at
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
* @size: The maximum size to search
*/
*/
static
__inline__
int
find_next_bit
(
void
*
addr
,
int
size
,
int
offset
)
static
__inline__
int
find_next_bit
(
void
*
addr
,
int
size
,
int
offset
)
{
{
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
5
);
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
5
);
int
set
=
0
,
bit
=
offset
&
31
,
res
;
int
set
=
0
,
bit
=
offset
&
31
,
res
;
...
...
include/asm-i386/mmu_context.h
View file @
7e54bc75
...
@@ -8,14 +8,10 @@
...
@@ -8,14 +8,10 @@
/*
/*
* Every architecture must define this function. It's the fastest
* Every architecture must define this function. It's the fastest
* way of searching a 1
68-bit bitmap where the first 128
bits are
* way of searching a 1
40-bit bitmap where the first 100
bits are
* unlikely to be set. It's guaranteed that at least one of the 1
68
* unlikely to be set. It's guaranteed that at least one of the 1
40
* bits is cleared.
* bits is cleared.
*/
*/
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
# error update this function.
#endif
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
{
if
(
unlikely
(
b
[
0
]))
if
(
unlikely
(
b
[
0
]))
...
@@ -24,11 +20,9 @@ static inline int sched_find_first_bit(unsigned long *b)
...
@@ -24,11 +20,9 @@ static inline int sched_find_first_bit(unsigned long *b)
return
__ffs
(
b
[
1
])
+
32
;
return
__ffs
(
b
[
1
])
+
32
;
if
(
unlikely
(
b
[
2
]))
if
(
unlikely
(
b
[
2
]))
return
__ffs
(
b
[
2
])
+
64
;
return
__ffs
(
b
[
2
])
+
64
;
if
(
unlikely
(
b
[
3
])
)
if
(
b
[
3
]
)
return
__ffs
(
b
[
3
])
+
96
;
return
__ffs
(
b
[
3
])
+
96
;
if
(
b
[
4
])
return
__ffs
(
b
[
4
])
+
128
;
return
__ffs
(
b
[
4
])
+
128
;
return
__ffs
(
b
[
5
])
+
32
+
128
;
}
}
/*
/*
* possibly do the LDT unload here?
* possibly do the LDT unload here?
...
...
include/linux/init_task.h
View file @
7e54bc75
...
@@ -45,7 +45,8 @@
...
@@ -45,7 +45,8 @@
thread_info: &init_thread_info, \
thread_info: &init_thread_info, \
flags: 0, \
flags: 0, \
lock_depth: -1, \
lock_depth: -1, \
__nice: DEF_USER_NICE, \
prio: 120, \
static_prio: 120, \
policy: SCHED_OTHER, \
policy: SCHED_OTHER, \
cpus_allowed: -1, \
cpus_allowed: -1, \
mm: NULL, \
mm: NULL, \
...
...
include/linux/sched.h
View file @
7e54bc75
...
@@ -150,7 +150,7 @@ extern void trap_init(void);
...
@@ -150,7 +150,7 @@ extern void trap_init(void);
extern
void
update_process_times
(
int
user
);
extern
void
update_process_times
(
int
user
);
extern
void
update_one_process
(
struct
task_struct
*
p
,
unsigned
long
user
,
extern
void
update_one_process
(
struct
task_struct
*
p
,
unsigned
long
user
,
unsigned
long
system
,
int
cpu
);
unsigned
long
system
,
int
cpu
);
extern
void
scheduler_tick
(
struct
task_struct
*
p
);
extern
void
scheduler_tick
(
int
user_tick
,
int
system
);
extern
void
sched_task_migrated
(
struct
task_struct
*
p
);
extern
void
sched_task_migrated
(
struct
task_struct
*
p
);
extern
void
smp_migrate_task
(
int
cpu
,
task_t
*
task
);
extern
void
smp_migrate_task
(
int
cpu
,
task_t
*
task
);
extern
unsigned
long
cache_decay_ticks
;
extern
unsigned
long
cache_decay_ticks
;
...
@@ -241,18 +241,16 @@ struct task_struct {
...
@@ -241,18 +241,16 @@ struct task_struct {
int
lock_depth
;
/* Lock depth */
int
lock_depth
;
/* Lock depth */
int
prio
;
int
prio
,
static_prio
;
long
__nice
;
list_t
run_list
;
list_t
run_list
;
prio_array_t
*
array
;
prio_array_t
*
array
;
unsigned
int
time_slice
;
unsigned
long
sleep_avg
;
unsigned
long
sleep_avg
;
unsigned
long
sleep_timestamp
;
unsigned
long
sleep_timestamp
;
unsigned
long
policy
;
unsigned
long
policy
;
unsigned
long
cpus_allowed
;
unsigned
long
cpus_allowed
;
unsigned
int
time_slice
;
struct
task_struct
*
next_task
,
*
prev_task
;
struct
task_struct
*
next_task
,
*
prev_task
;
...
@@ -385,66 +383,12 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
...
@@ -385,66 +383,12 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
*/
*/
#define _STK_LIM (8*1024*1024)
#define _STK_LIM (8*1024*1024)
/*
* RT priorites go from 0 to 99, but internally we max
* them out at 128 to make it easier to search the
* scheduler bitmap.
*/
#define MAX_RT_PRIO 128
/*
* The lower the priority of a process, the more likely it is
* to run. Priority of a process goes from 0 to 167. The 0-99
* priority range is allocated to RT tasks, the 128-167 range
* is for SCHED_OTHER tasks.
*/
#define MAX_PRIO (MAX_RT_PRIO + 40)
/*
* Scales user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ 128 ... 167 (MAX_PRIO-1) ]
*
* User-nice value of -20 == static priority 128, and
* user-nice value 19 == static priority 167. The lower
* the priority value, the higher the task's priority.
*/
#define NICE_TO_PRIO(n) (MAX_RT_PRIO + (n) + 20)
#define DEF_USER_NICE 0
/*
* Default timeslice is 150 msecs, maximum is 300 msecs.
* Minimum timeslice is 10 msecs.
*
* These are the 'tuning knobs' of the scheduler:
*/
#define MIN_TIMESLICE ( 10 * HZ / 1000)
#define MAX_TIMESLICE (300 * HZ / 1000)
#define CHILD_FORK_PENALTY 95
#define PARENT_FORK_PENALTY 100
#define EXIT_WEIGHT 3
#define PRIO_INTERACTIVE_RATIO 20
#define PRIO_CPU_HOG_RATIO 60
#define PRIO_BONUS_RATIO 70
#define INTERACTIVE_DELTA 3
#define MAX_SLEEP_AVG (2*HZ)
#define STARVATION_LIMIT (2*HZ)
#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
/*
* NICE_TO_TIMESLICE scales nice values [ -20 ... 19 ]
* to time slice values.
*
* The higher a process's priority, the bigger timeslices
* it gets during one round of execution. But even the lowest
* priority process gets MIN_TIMESLICE worth of execution time.
*/
#define NICE_TO_TIMESLICE(n) (MIN_TIMESLICE + \
((MAX_TIMESLICE - MIN_TIMESLICE) * (19-(n))) / 39)
extern
void
set_cpus_allowed
(
task_t
*
p
,
unsigned
long
new_mask
);
extern
void
set_cpus_allowed
(
task_t
*
p
,
unsigned
long
new_mask
);
extern
void
set_user_nice
(
task_t
*
p
,
long
nice
);
extern
void
set_user_nice
(
task_t
*
p
,
long
nice
);
extern
int
task_prio
(
task_t
*
p
);
extern
int
task_nice
(
task_t
*
p
);
extern
int
idle_cpu
(
int
cpu
);
asmlinkage
long
sys_sched_yield
(
void
);
asmlinkage
long
sys_sched_yield
(
void
);
#define yield() sys_sched_yield()
#define yield() sys_sched_yield()
...
@@ -526,6 +470,7 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
...
@@ -526,6 +470,7 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed
long
timeout
));
signed
long
timeout
));
extern
int
FASTCALL
(
wake_up_process
(
struct
task_struct
*
tsk
));
extern
int
FASTCALL
(
wake_up_process
(
struct
task_struct
*
tsk
));
extern
void
FASTCALL
(
wake_up_forked_process
(
struct
task_struct
*
tsk
));
extern
void
FASTCALL
(
wake_up_forked_process
(
struct
task_struct
*
tsk
));
extern
void
FASTCALL
(
sched_exit
(
task_t
*
p
));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
...
...
kernel/exit.c
View file @
7e54bc75
...
@@ -31,8 +31,6 @@ int getrusage(struct task_struct *, int, struct rusage *);
...
@@ -31,8 +31,6 @@ int getrusage(struct task_struct *, int, struct rusage *);
static
void
release_task
(
struct
task_struct
*
p
)
static
void
release_task
(
struct
task_struct
*
p
)
{
{
unsigned
long
flags
;
if
(
p
==
current
)
if
(
p
==
current
)
BUG
();
BUG
();
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
@@ -46,25 +44,7 @@ static void release_task(struct task_struct * p)
...
@@ -46,25 +44,7 @@ static void release_task(struct task_struct * p)
current
->
cmin_flt
+=
p
->
min_flt
+
p
->
cmin_flt
;
current
->
cmin_flt
+=
p
->
min_flt
+
p
->
cmin_flt
;
current
->
cmaj_flt
+=
p
->
maj_flt
+
p
->
cmaj_flt
;
current
->
cmaj_flt
+=
p
->
maj_flt
+
p
->
cmaj_flt
;
current
->
cnswap
+=
p
->
nswap
+
p
->
cnswap
;
current
->
cnswap
+=
p
->
nswap
+
p
->
cnswap
;
/*
sched_exit
(
p
);
* Potentially available timeslices are retrieved
* here - this way the parent does not get penalized
* for creating too many processes.
*
* (this cannot be used to artificially 'generate'
* timeslices, because any timeslice recovered here
* was given away by the parent in the first place.)
*/
__save_flags
(
flags
);
__cli
();
current
->
time_slice
+=
p
->
time_slice
;
if
(
current
->
time_slice
>
MAX_TIMESLICE
)
current
->
time_slice
=
MAX_TIMESLICE
;
if
(
p
->
sleep_avg
<
current
->
sleep_avg
)
current
->
sleep_avg
=
(
current
->
sleep_avg
*
EXIT_WEIGHT
+
p
->
sleep_avg
)
/
(
EXIT_WEIGHT
+
1
);
__restore_flags
(
flags
);
p
->
pid
=
0
;
p
->
pid
=
0
;
put_task_struct
(
p
);
put_task_struct
(
p
);
}
}
...
@@ -172,9 +152,8 @@ void reparent_to_init(void)
...
@@ -172,9 +152,8 @@ void reparent_to_init(void)
current
->
exit_signal
=
SIGCHLD
;
current
->
exit_signal
=
SIGCHLD
;
current
->
ptrace
=
0
;
current
->
ptrace
=
0
;
if
((
current
->
policy
==
SCHED_OTHER
)
&&
if
((
current
->
policy
==
SCHED_OTHER
)
&&
(
task_nice
(
current
)
<
0
))
(
current
->
__nice
<
DEF_USER_NICE
))
set_user_nice
(
current
,
0
);
set_user_nice
(
current
,
DEF_USER_NICE
);
/* cpus_allowed? */
/* cpus_allowed? */
/* rt_priority? */
/* rt_priority? */
/* signals? */
/* signals? */
...
...
kernel/fork.c
View file @
7e54bc75
...
@@ -749,14 +749,11 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
...
@@ -749,14 +749,11 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
* runqueue lock is not a problem.
* runqueue lock is not a problem.
*/
*/
current
->
time_slice
=
1
;
current
->
time_slice
=
1
;
scheduler_tick
(
current
);
scheduler_tick
(
0
,
0
);
}
}
p
->
sleep_timestamp
=
jiffies
;
p
->
sleep_timestamp
=
jiffies
;
__restore_flags
(
flags
);
__restore_flags
(
flags
);
if
(
p
->
policy
==
SCHED_OTHER
)
p
->
prio
=
MAX_PRIO
-
1
-
((
MAX_PRIO
-
1
-
p
->
prio
)
*
1
)
/
3
;
/*
/*
* Ok, add it to the run-queues and make it
* Ok, add it to the run-queues and make it
* visible to the rest of the system.
* visible to the rest of the system.
...
...
kernel/ksyms.c
View file @
7e54bc75
...
@@ -455,6 +455,8 @@ EXPORT_SYMBOL(preempt_schedule);
...
@@ -455,6 +455,8 @@ EXPORT_SYMBOL(preempt_schedule);
EXPORT_SYMBOL
(
schedule_timeout
);
EXPORT_SYMBOL
(
schedule_timeout
);
EXPORT_SYMBOL
(
sys_sched_yield
);
EXPORT_SYMBOL
(
sys_sched_yield
);
EXPORT_SYMBOL
(
set_user_nice
);
EXPORT_SYMBOL
(
set_user_nice
);
EXPORT_SYMBOL
(
task_nice
);
EXPORT_SYMBOL_GPL
(
idle_cpu
);
EXPORT_SYMBOL
(
jiffies
);
EXPORT_SYMBOL
(
jiffies
);
EXPORT_SYMBOL
(
xtime
);
EXPORT_SYMBOL
(
xtime
);
EXPORT_SYMBOL
(
do_gettimeofday
);
EXPORT_SYMBOL
(
do_gettimeofday
);
...
...
kernel/sched.c
View file @
7e54bc75
This diff is collapsed.
Click to expand it.
kernel/sys.c
View file @
7e54bc75
...
@@ -221,7 +221,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
...
@@ -221,7 +221,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
}
}
if
(
error
==
-
ESRCH
)
if
(
error
==
-
ESRCH
)
error
=
0
;
error
=
0
;
if
(
niceval
<
p
->
__nice
&&
!
capable
(
CAP_SYS_NICE
))
if
(
niceval
<
task_nice
(
p
)
&&
!
capable
(
CAP_SYS_NICE
))
error
=
-
EACCES
;
error
=
-
EACCES
;
else
else
set_user_nice
(
p
,
niceval
);
set_user_nice
(
p
,
niceval
);
...
@@ -250,7 +250,7 @@ asmlinkage long sys_getpriority(int which, int who)
...
@@ -250,7 +250,7 @@ asmlinkage long sys_getpriority(int which, int who)
long
niceval
;
long
niceval
;
if
(
!
proc_sel
(
p
,
which
,
who
))
if
(
!
proc_sel
(
p
,
which
,
who
))
continue
;
continue
;
niceval
=
20
-
p
->
__nice
;
niceval
=
20
-
task_nice
(
p
)
;
if
(
niceval
>
retval
)
if
(
niceval
>
retval
)
retval
=
niceval
;
retval
=
niceval
;
}
}
...
...
kernel/timer.c
View file @
7e54bc75
...
@@ -584,17 +584,7 @@ void update_process_times(int user_tick)
...
@@ -584,17 +584,7 @@ void update_process_times(int user_tick)
int
cpu
=
smp_processor_id
(),
system
=
user_tick
^
1
;
int
cpu
=
smp_processor_id
(),
system
=
user_tick
^
1
;
update_one_process
(
p
,
user_tick
,
system
,
cpu
);
update_one_process
(
p
,
user_tick
,
system
,
cpu
);
if
(
p
->
pid
)
{
scheduler_tick
(
user_tick
,
system
);
if
(
p
->
__nice
>
0
)
kstat
.
per_cpu_nice
[
cpu
]
+=
user_tick
;
else
kstat
.
per_cpu_user
[
cpu
]
+=
user_tick
;
kstat
.
per_cpu_system
[
cpu
]
+=
system
;
}
else
{
if
(
local_bh_count
(
cpu
)
||
local_irq_count
(
cpu
)
>
1
)
kstat
.
per_cpu_system
[
cpu
]
+=
system
;
}
scheduler_tick
(
p
);
}
}
/*
/*
...
...
mm/oom_kill.c
View file @
7e54bc75
...
@@ -82,7 +82,7 @@ static int badness(struct task_struct *p)
...
@@ -82,7 +82,7 @@ static int badness(struct task_struct *p)
* Niced processes are most likely less important, so double
* Niced processes are most likely less important, so double
* their badness points.
* their badness points.
*/
*/
if
(
p
->
__nice
>
0
)
if
(
task_nice
(
p
)
>
0
)
points
*=
2
;
points
*=
2
;
/*
/*
...
@@ -146,7 +146,7 @@ void oom_kill_task(struct task_struct *p)
...
@@ -146,7 +146,7 @@ void oom_kill_task(struct task_struct *p)
* all the memory it needs. That way it should be able to
* all the memory it needs. That way it should be able to
* exit() and clear out its resources quickly...
* exit() and clear out its resources quickly...
*/
*/
p
->
time_slice
=
2
*
MAX_TIMESLICE
;
p
->
time_slice
=
HZ
;
p
->
flags
|=
PF_MEMALLOC
|
PF_MEMDIE
;
p
->
flags
|=
PF_MEMALLOC
|
PF_MEMDIE
;
/* This process has hardware access, be more careful. */
/* This process has hardware access, be more careful. */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment