Commit f72a209a authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'irq-urgent-for-linus', 'x86-urgent-for-linus' and...

Merge branches 'irq-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip

* 'irq-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  irq: Fix check for already initialized irq_domain in irq_domain_add
  irq: Add declaration of irq_domain_simple_ops to irqdomain.h

* 'x86-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  x86/rtc: Don't recursively acquire rtc_lock

* 'sched-urgent-for-linus' of git://tesla.tglx.de/git/linux-2.6-tip:
  posix-cpu-timers: Cure SMP wobbles
  sched: Fix up wchan borkage
  sched/rt: Migrate equal priority tasks to available CPUs
...@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime) ...@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime)
{ {
int real_seconds, real_minutes, cmos_minutes; int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select; unsigned char save_control, save_freq_select;
unsigned long flags;
int retval = 0; int retval = 0;
spin_lock_irqsave(&rtc_lock, flags);
/* tell the clock it's being set */ /* tell the clock it's being set */
save_control = CMOS_READ(RTC_CONTROL); save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
...@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime) ...@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime)
CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return retval; return retval;
} }
unsigned long mach_get_cmos_time(void) unsigned long mach_get_cmos_time(void)
{ {
unsigned int status, year, mon, day, hour, min, sec, century = 0; unsigned int status, year, mon, day, hour, min, sec, century = 0;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
/* /*
* If UIP is clear, then we have >= 244 microseconds before * If UIP is clear, then we have >= 244 microseconds before
...@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void) ...@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void)
status = CMOS_READ(RTC_CONTROL); status = CMOS_READ(RTC_CONTROL);
WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
spin_unlock_irqrestore(&rtc_lock, flags);
if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
sec = bcd2bin(sec); sec = bcd2bin(sec);
min = bcd2bin(min); min = bcd2bin(min);
...@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write); ...@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write);
int update_persistent_clock(struct timespec now) int update_persistent_clock(struct timespec now)
{ {
unsigned long flags; return x86_platform.set_wallclock(now.tv_sec);
int retval;
spin_lock_irqsave(&rtc_lock, flags);
retval = x86_platform.set_wallclock(now.tv_sec);
spin_unlock_irqrestore(&rtc_lock, flags);
return retval;
} }
/* not static: needed by APM */ /* not static: needed by APM */
void read_persistent_clock(struct timespec *ts) void read_persistent_clock(struct timespec *ts)
{ {
unsigned long retval, flags; unsigned long retval;
spin_lock_irqsave(&rtc_lock, flags);
retval = x86_platform.get_wallclock(); retval = x86_platform.get_wallclock();
spin_unlock_irqrestore(&rtc_lock, flags);
ts->tv_sec = retval; ts->tv_sec = retval;
ts->tv_nsec = 0; ts->tv_nsec = 0;
......
...@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write); ...@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write);
unsigned long vrtc_get_time(void) unsigned long vrtc_get_time(void)
{ {
u8 sec, min, hour, mday, mon; u8 sec, min, hour, mday, mon;
unsigned long flags;
u32 year; u32 year;
spin_lock_irqsave(&rtc_lock, flags);
while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
cpu_relax(); cpu_relax();
...@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void) ...@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void)
mon = vrtc_cmos_read(RTC_MONTH); mon = vrtc_cmos_read(RTC_MONTH);
year = vrtc_cmos_read(RTC_YEAR); year = vrtc_cmos_read(RTC_YEAR);
spin_unlock_irqrestore(&rtc_lock, flags);
/* vRTC YEAR reg contains the offset to 1960 */ /* vRTC YEAR reg contains the offset to 1960 */
year += 1960; year += 1960;
...@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void) ...@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void)
int vrtc_set_mmss(unsigned long nowtime) int vrtc_set_mmss(unsigned long nowtime)
{ {
int real_sec, real_min; int real_sec, real_min;
unsigned long flags;
int vrtc_min; int vrtc_min;
spin_lock_irqsave(&rtc_lock, flags);
vrtc_min = vrtc_cmos_read(RTC_MINUTES); vrtc_min = vrtc_cmos_read(RTC_MINUTES);
real_sec = nowtime % 60; real_sec = nowtime % 60;
...@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime) ...@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime)
vrtc_cmos_write(real_sec, RTC_SECONDS); vrtc_cmos_write(real_sec, RTC_SECONDS);
vrtc_cmos_write(real_min, RTC_MINUTES); vrtc_cmos_write(real_min, RTC_MINUTES);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0; return 0;
} }
......
...@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain); ...@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain);
#endif /* CONFIG_IRQ_DOMAIN */ #endif /* CONFIG_IRQ_DOMAIN */
#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
extern struct irq_domain_ops irq_domain_simple_ops;
extern void irq_domain_add_simple(struct device_node *controller, int irq_base); extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
extern void irq_domain_generate_simple(const struct of_device_id *match, extern void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start); u64 phys_base, unsigned int irq_start);
......
...@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {} ...@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {}
extern unsigned long long extern unsigned long long
task_sched_runtime(struct task_struct *task); task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */ /* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain) ...@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
*/ */
for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
if (d || d->domain) { if (!d) {
WARN(1, "error: assigning domain to non existant irq_desc");
return;
}
if (d->domain) {
/* things are broken; just report, don't clean up */ /* things are broken; just report, don't clean up */
WARN(1, "error: irq_desc already assigned to a domain"); WARN(1, "error: irq_desc already assigned to a domain");
return; return;
......
...@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) ...@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
do { do {
times->utime = cputime_add(times->utime, t->utime); times->utime = cputime_add(times->utime, t->utime);
times->stime = cputime_add(times->stime, t->stime); times->stime = cputime_add(times->stime, t->stime);
times->sum_exec_runtime += t->se.sum_exec_runtime; times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t); } while_each_thread(tsk, t);
out: out:
rcu_read_unlock(); rcu_read_unlock();
...@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock, ...@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
cpu->cpu = cputime.utime; cpu->cpu = cputime.utime;
break; break;
case CPUCLOCK_SCHED: case CPUCLOCK_SCHED:
cpu->sched = thread_group_sched_runtime(p); thread_group_cputime(p, &cputime);
cpu->sched = cputime.sum_exec_runtime;
break; break;
} }
return 0; return 0;
......
...@@ -3724,30 +3724,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) ...@@ -3724,30 +3724,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns; return ns;
} }
/*
* Return sum_exec_runtime for the thread group.
* In case the task is currently running, return the sum plus current's
* pending runtime that have not been accounted yet.
*
* Note that the thread group might have other running tasks as well,
* so the return value not includes other pending runtime that other
* running tasks might have.
*/
unsigned long long thread_group_sched_runtime(struct task_struct *p)
{
struct task_cputime totals;
unsigned long flags;
struct rq *rq;
u64 ns;
rq = task_rq_lock(p, &flags);
thread_group_cputime(p, &totals);
ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/* /*
* Account user cpu time to a process. * Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the cpu time gets accounted to
...@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk) ...@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk); blk_schedule_flush_plug(tsk);
} }
asmlinkage void schedule(void) asmlinkage void __sched schedule(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
......
...@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) ...@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
*/ */
if (curr && unlikely(rt_task(curr)) && if (curr && unlikely(rt_task(curr)) &&
(curr->rt.nr_cpus_allowed < 2 || (curr->rt.nr_cpus_allowed < 2 ||
curr->prio < p->prio) && curr->prio <= p->prio) &&
(p->rt.nr_cpus_allowed > 1)) { (p->rt.nr_cpus_allowed > 1)) {
int target = find_lowest_rq(p); int target = find_lowest_rq(p);
...@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) ...@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
p->rt.nr_cpus_allowed > 1 && p->rt.nr_cpus_allowed > 1 &&
rt_task(rq->curr) && rt_task(rq->curr) &&
(rq->curr->rt.nr_cpus_allowed < 2 || (rq->curr->rt.nr_cpus_allowed < 2 ||
rq->curr->prio < p->prio)) rq->curr->prio <= p->prio))
push_rt_tasks(rq); push_rt_tasks(rq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment