Commit 37c00b84 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  latencytop: change /proc task_struct access method
  latencytop: fix memory leak on latency proc file
  latencytop: fix kernel panic while reading latency proc file
  sched: add declaration of sched_tail to sched.h
  sched: fix signedness warnings in sched.c
  sched: clean up __pick_last_entity() a bit
  sched: remove duplicate code from sched_fair.c
  sched: make early bootup sched_clock() use safer
parents cf3680b9 13d77c37
...@@ -128,8 +128,6 @@ void *get_current(void) ...@@ -128,8 +128,6 @@ void *get_current(void)
return current; return current;
} }
extern void schedule_tail(struct task_struct *prev);
/* /*
* This is called magically, by its address being stuffed in a jmp_buf * This is called magically, by its address being stuffed in a jmp_buf
* and being longjmp-d to. * and being longjmp-d to.
......
...@@ -314,9 +314,12 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer) ...@@ -314,9 +314,12 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
static int lstats_show_proc(struct seq_file *m, void *v) static int lstats_show_proc(struct seq_file *m, void *v)
{ {
int i; int i;
struct task_struct *task = m->private; struct inode *inode = m->private;
seq_puts(m, "Latency Top version : v0.1\n"); struct task_struct *task = get_proc_task(inode);
if (!task)
return -ESRCH;
seq_puts(m, "Latency Top version : v0.1\n");
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
if (task->latency_record[i].backtrace[0]) { if (task->latency_record[i].backtrace[0]) {
int q; int q;
...@@ -341,32 +344,24 @@ static int lstats_show_proc(struct seq_file *m, void *v) ...@@ -341,32 +344,24 @@ static int lstats_show_proc(struct seq_file *m, void *v)
} }
} }
put_task_struct(task);
return 0; return 0;
} }
static int lstats_open(struct inode *inode, struct file *file) static int lstats_open(struct inode *inode, struct file *file)
{ {
int ret; return single_open(file, lstats_show_proc, inode);
struct seq_file *m;
struct task_struct *task = get_proc_task(inode);
ret = single_open(file, lstats_show_proc, NULL);
if (!ret) {
m = file->private_data;
m->private = task;
}
return ret;
} }
static ssize_t lstats_write(struct file *file, const char __user *buf, static ssize_t lstats_write(struct file *file, const char __user *buf,
size_t count, loff_t *offs) size_t count, loff_t *offs)
{ {
struct seq_file *m; struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
struct task_struct *task;
m = file->private_data; if (!task)
task = m->private; return -ESRCH;
clear_all_latency_tracing(task); clear_all_latency_tracing(task);
put_task_struct(task);
return count; return count;
} }
......
...@@ -242,6 +242,7 @@ struct task_struct; ...@@ -242,6 +242,7 @@ struct task_struct;
extern void sched_init(void); extern void sched_init(void);
extern void sched_init_smp(void); extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle); extern void init_idle_bootup_task(struct task_struct *idle);
......
...@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; ...@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
*/ */
unsigned int sysctl_sched_rt_period = 1000000; unsigned int sysctl_sched_rt_period = 1000000;
static __read_mostly int scheduler_running;
/* /*
* part of the period that we allow rt tasks to run in us. * part of the period that we allow rt tasks to run in us.
* default: 0.95s * default: 0.95s
...@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu) ...@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
local_irq_save(flags);
rq = cpu_rq(cpu);
/* /*
* Only call sched_clock() if the scheduler has already been * Only call sched_clock() if the scheduler has already been
* initialized (some code might call cpu_clock() very early): * initialized (some code might call cpu_clock() very early):
*/ */
if (rq->idle) if (unlikely(!scheduler_running))
update_rq_clock(rq); return 0;
local_irq_save(flags);
rq = cpu_rq(cpu);
update_rq_clock(rq);
now = rq->clock; now = rq->clock;
local_irq_restore(flags); local_irq_restore(flags);
...@@ -3885,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev) ...@@ -3885,7 +3889,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
asmlinkage void __sched schedule(void) asmlinkage void __sched schedule(void)
{ {
struct task_struct *prev, *next; struct task_struct *prev, *next;
long *switch_count; unsigned long *switch_count;
struct rq *rq; struct rq *rq;
int cpu; int cpu;
...@@ -7284,6 +7288,8 @@ void __init sched_init(void) ...@@ -7284,6 +7288,8 @@ void __init sched_init(void)
* During early bootup we pretend to be a normal task: * During early bootup we pretend to be a normal task:
*/ */
current->sched_class = &fair_sched_class; current->sched_class = &fair_sched_class;
scheduler_running = 1;
} }
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
......
...@@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) ...@@ -202,17 +202,12 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{ {
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
struct sched_entity *se = NULL;
struct rb_node *parent;
while (*link) { if (!last)
parent = *link; return NULL;
se = rb_entry(parent, struct sched_entity, run_node);
link = &parent->rb_right;
}
return se; return rb_entry(last, struct sched_entity, run_node);
} }
/************************************************************** /**************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment