Commit b64c5fda authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core timer changes from Ingo Molnar:
 "It contains continued generic-NOHZ work by Frederic and smaller
  cleanups."

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  time: Kill xtime_lock, replacing it with jiffies_lock
  clocksource: arm_generic: use this_cpu_ptr per-cpu helper
  clocksource: arm_generic: use integer math helpers
  time/jiffies: Make clocksource_jiffies static
  clocksource: clean up parse_pmtmr()
  tick: Correct the comments for tick_sched_timer()
  tick: Conditionally build nohz specific code in tick handler
  tick: Consolidate tick handling for high and low res handlers
  tick: Consolidate timekeeping handling code
parents f57d54ba 9c3f9e28
...@@ -233,16 +233,15 @@ fs_initcall(init_acpi_pm_clocksource); ...@@ -233,16 +233,15 @@ fs_initcall(init_acpi_pm_clocksource);
*/ */
static int __init parse_pmtmr(char *arg) static int __init parse_pmtmr(char *arg)
{ {
unsigned long base; unsigned int base;
int ret;
if (strict_strtoul(arg, 16, &base)) ret = kstrtouint(arg, 16, &base);
return -EINVAL; if (ret)
#ifdef CONFIG_X86_64 return ret;
if (base > UINT_MAX)
return -ERANGE; pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
#endif base);
printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
pmtmr_ioport, base);
pmtmr_ioport = base; pmtmr_ioport = base;
return 1; return 1;
......
...@@ -127,7 +127,7 @@ static void __init arch_timer_calibrate(void) ...@@ -127,7 +127,7 @@ static void __init arch_timer_calibrate(void)
/* Cache the sched_clock multiplier to save a divide in the hot path. */ /* Cache the sched_clock multiplier to save a divide in the hot path. */
sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate);
pr_info("Architected local timer running at %u.%02uMHz.\n", pr_info("Architected local timer running at %u.%02uMHz.\n",
arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
...@@ -221,10 +221,10 @@ int __init arm_generic_timer_init(void) ...@@ -221,10 +221,10 @@ int __init arm_generic_timer_init(void)
clocksource_register_hz(&clocksource_counter, arch_timer_rate); clocksource_register_hz(&clocksource_counter, arch_timer_rate);
/* Calibrate the delay loop directly */ /* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ; lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
/* Immediately configure the timer on the boot CPU */ /* Immediately configure the timer on the boot CPU */
arch_timer_setup(per_cpu_ptr(&arch_timer_evt, smp_processor_id())); arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
register_cpu_notifier(&arch_timer_cpu_nb); register_cpu_notifier(&arch_timer_cpu_nb);
......
...@@ -35,7 +35,7 @@ static cycle_t i8253_read(struct clocksource *cs) ...@@ -35,7 +35,7 @@ static cycle_t i8253_read(struct clocksource *cs)
raw_spin_lock_irqsave(&i8253_lock, flags); raw_spin_lock_irqsave(&i8253_lock, flags);
/* /*
* Although our caller may have the read side of xtime_lock, * Although our caller may have the read side of jiffies_lock,
* this is now a seqlock, and we are cheating in this routine * this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if * by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to * there is a collision on the seqlock and our caller has to
......
...@@ -70,11 +70,12 @@ extern int register_refined_jiffies(long clock_tick_rate); ...@@ -70,11 +70,12 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* /*
* The 64-bit value is not atomic - you MUST NOT read it * The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in xtime_lock. * without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate. * get_jiffies_64() will do this for you as appropriate.
*/ */
extern u64 __jiffy_data jiffies_64; extern u64 __jiffy_data jiffies_64;
extern unsigned long volatile __jiffy_data jiffies; extern unsigned long volatile __jiffy_data jiffies;
extern seqlock_t jiffies_lock;
#if (BITS_PER_LONG < 64) #if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void); u64 get_jiffies_64(void);
......
...@@ -58,7 +58,7 @@ static cycle_t jiffies_read(struct clocksource *cs) ...@@ -58,7 +58,7 @@ static cycle_t jiffies_read(struct clocksource *cs)
return (cycle_t) jiffies; return (cycle_t) jiffies;
} }
struct clocksource clocksource_jiffies = { static struct clocksource clocksource_jiffies = {
.name = "jiffies", .name = "jiffies",
.rating = 1, /* lowest valid rating*/ .rating = 1, /* lowest valid rating*/
.read = jiffies_read, .read = jiffies_read,
...@@ -67,6 +67,8 @@ struct clocksource clocksource_jiffies = { ...@@ -67,6 +67,8 @@ struct clocksource clocksource_jiffies = {
.shift = JIFFIES_SHIFT, .shift = JIFFIES_SHIFT,
}; };
__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
#if (BITS_PER_LONG < 64) #if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void) u64 get_jiffies_64(void)
{ {
...@@ -74,9 +76,9 @@ u64 get_jiffies_64(void) ...@@ -74,9 +76,9 @@ u64 get_jiffies_64(void)
u64 ret; u64 ret;
do { do {
seq = read_seqbegin(&xtime_lock); seq = read_seqbegin(&jiffies_lock);
ret = jiffies_64; ret = jiffies_64;
} while (read_seqretry(&xtime_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
return ret; return ret;
} }
EXPORT_SYMBOL(get_jiffies_64); EXPORT_SYMBOL(get_jiffies_64);
......
...@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void) ...@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu) static void tick_periodic(int cpu)
{ {
if (tick_do_timer_cpu == cpu) { if (tick_do_timer_cpu == cpu) {
write_seqlock(&xtime_lock); write_seqlock(&jiffies_lock);
/* Keep track of the next tick event */ /* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period); tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1); do_timer(1);
write_sequnlock(&xtime_lock); write_sequnlock(&jiffies_lock);
} }
update_process_times(user_mode(get_irq_regs())); update_process_times(user_mode(get_irq_regs()));
...@@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ...@@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next; ktime_t next;
do { do {
seq = read_seqbegin(&xtime_lock); seq = read_seqbegin(&jiffies_lock);
next = tick_next_period; next = tick_next_period;
} while (read_seqretry(&xtime_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
......
...@@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) ...@@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif #endif
extern void do_timer(unsigned long ticks); extern void do_timer(unsigned long ticks);
extern seqlock_t xtime_lock;
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/* /*
* The time, when the last jiffy update happened. Protected by xtime_lock. * The time, when the last jiffy update happened. Protected by jiffies_lock.
*/ */
static ktime_t last_jiffies_update; static ktime_t last_jiffies_update;
...@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now) ...@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
ktime_t delta; ktime_t delta;
/* /*
* Do a quick check without holding xtime_lock: * Do a quick check without holding jiffies_lock:
*/ */
delta = ktime_sub(now, last_jiffies_update); delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 < tick_period.tv64) if (delta.tv64 < tick_period.tv64)
return; return;
/* Reevalute with xtime_lock held */ /* Reevalute with jiffies_lock held */
write_seqlock(&xtime_lock); write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update); delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) { if (delta.tv64 >= tick_period.tv64) {
...@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now) ...@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */ /* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period); tick_next_period = ktime_add(last_jiffies_update, tick_period);
} }
write_sequnlock(&xtime_lock); write_sequnlock(&jiffies_lock);
} }
/* /*
...@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void) ...@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void)
{ {
ktime_t period; ktime_t period;
write_seqlock(&xtime_lock); write_seqlock(&jiffies_lock);
/* Did we start the jiffies update yet ? */ /* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0) if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period; last_jiffies_update = tick_next_period;
period = last_jiffies_update; period = last_jiffies_update;
write_sequnlock(&xtime_lock); write_sequnlock(&jiffies_lock);
return period; return period;
} }
static void tick_sched_do_timer(ktime_t now)
{
int cpu = smp_processor_id();
#ifdef CONFIG_NO_HZ
/*
* Check if the do_timer duty was dropped. We don't care about
* concurrency: This happens only when the cpu in charge went
* into a long sleep. If two cpus happen to assign themself to
* this duty, then the jiffies update is still serialized by
* jiffies_lock.
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu;
#endif
/* Check, if the jiffies need an update */
if (tick_do_timer_cpu == cpu)
tick_do_update_jiffies64(now);
}
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
{
#ifdef CONFIG_NO_HZ
/*
* When we are idle and the tick is stopped, we have to touch
* the watchdog as we might not schedule for a really long
* time. This happens on complete idle SMP systems while
* waiting on the login prompt. We also increment the "start of
* idle" jiffy stamp so the idle accounting adjustment we do
* when we go busy again does not account too much ticks.
*/
if (ts->tick_stopped) {
touch_softlockup_watchdog();
if (is_idle_task(current))
ts->idle_jiffies++;
}
#endif
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
}
/* /*
* NOHZ - aka dynamic tick functionality * NOHZ - aka dynamic tick functionality
*/ */
...@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */ /* Read jiffies and the time when jiffies were updated last */
do { do {
seq = read_seqbegin(&xtime_lock); seq = read_seqbegin(&jiffies_lock);
last_update = last_jiffies_update; last_update = last_jiffies_update;
last_jiffies = jiffies; last_jiffies = jiffies;
time_delta = timekeeping_max_deferment(); time_delta = timekeeping_max_deferment();
} while (read_seqretry(&xtime_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) { arch_needs_cpu(cpu)) {
...@@ -652,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev) ...@@ -652,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = get_irq_regs();
int cpu = smp_processor_id();
ktime_t now = ktime_get(); ktime_t now = ktime_get();
dev->next_event.tv64 = KTIME_MAX; dev->next_event.tv64 = KTIME_MAX;
/* tick_sched_do_timer(now);
* Check if the do_timer duty was dropped. We don't care about tick_sched_handle(ts, regs);
* concurrency: This happens only when the cpu in charge went
* into a long sleep. If two cpus happen to assign themself to
* this duty, then the jiffies update is still serialized by
* xtime_lock.
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu;
/* Check, if the jiffies need an update */
if (tick_do_timer_cpu == cpu)
tick_do_update_jiffies64(now);
/*
* When we are idle and the tick is stopped, we have to touch
* the watchdog as we might not schedule for a really long
* time. This happens on complete idle SMP systems while
* waiting on the login prompt. We also increment the "start
* of idle" jiffy stamp so the idle accounting adjustment we
* do when we go busy again does not account too much ticks.
*/
if (ts->tick_stopped) {
touch_softlockup_watchdog();
ts->idle_jiffies++;
}
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
while (tick_nohz_reprogram(ts, now)) { while (tick_nohz_reprogram(ts, now)) {
now = ktime_get(); now = ktime_get();
...@@ -806,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) ...@@ -806,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
container_of(timer, struct tick_sched, sched_timer); container_of(timer, struct tick_sched, sched_timer);
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get(); ktime_t now = ktime_get();
int cpu = smp_processor_id();
#ifdef CONFIG_NO_HZ
/*
* Check if the do_timer duty was dropped. We don't care about
* concurrency: This happens only when the cpu in charge went
* into a long sleep. If two cpus happen to assign themself to
* this duty, then the jiffies update is still serialized by
* xtime_lock.
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu;
#endif
/* Check, if the jiffies need an update */ tick_sched_do_timer(now);
if (tick_do_timer_cpu == cpu)
tick_do_update_jiffies64(now);
/* /*
* Do not call, when we are not in irq context and have * Do not call, when we are not in irq context and have
* no valid regs pointer * no valid regs pointer
*/ */
if (regs) { if (regs)
/* tick_sched_handle(ts, regs);
* When we are idle and the tick is stopped, we have to touch
* the watchdog as we might not schedule for a really long
* time. This happens on complete idle SMP systems while
* waiting on the login prompt. We also increment the "start of
* idle" jiffy stamp so the idle accounting adjustment we do
* when we go busy again does not account too much ticks.
*/
if (ts->tick_stopped) {
touch_softlockup_watchdog();
if (is_idle_task(current))
ts->idle_jiffies++;
}
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
}
hrtimer_forward(timer, now, tick_period); hrtimer_forward(timer, now, tick_period);
...@@ -878,7 +863,7 @@ void tick_setup_sched_timer(void) ...@@ -878,7 +863,7 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */ /* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
/* Offset the tick to avert xtime_lock contention. */ /* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) { if (sched_skew_tick) {
u64 offset = ktime_to_ns(tick_period) >> 1; u64 offset = ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus()); do_div(offset, num_possible_cpus());
......
...@@ -25,12 +25,6 @@ ...@@ -25,12 +25,6 @@
static struct timekeeper timekeeper; static struct timekeeper timekeeper;
/*
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
/* flag for if timekeeping is suspended */ /* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended; int __read_mostly timekeeping_suspended;
...@@ -1299,9 +1293,7 @@ struct timespec get_monotonic_coarse(void) ...@@ -1299,9 +1293,7 @@ struct timespec get_monotonic_coarse(void)
} }
/* /*
* The 64-bit jiffies value is not atomic - you MUST NOT read it * Must hold jiffies_lock
* without sampling the sequence number in xtime_lock.
* jiffies is defined in the linker script...
*/ */
void do_timer(unsigned long ticks) void do_timer(unsigned long ticks)
{ {
...@@ -1389,7 +1381,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); ...@@ -1389,7 +1381,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/ */
void xtime_update(unsigned long ticks) void xtime_update(unsigned long ticks)
{ {
write_seqlock(&xtime_lock); write_seqlock(&jiffies_lock);
do_timer(ticks); do_timer(ticks);
write_sequnlock(&xtime_lock); write_sequnlock(&jiffies_lock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment