Commit 65a99597 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull NOHZ updates from Ingo Molnar:
 "The main changes, mostly written by Frederic Weisbecker, include:

   - Fix some jiffies based cputime assumptions.  (No real harm because
     the concerned code isn't used by full dynticks.)

   - Simplify jiffies <-> usecs conversions.  Remove dead code.

   - Remove early hacks on nohz full code that avoided messing up idle
     nohz internals.  Now nohz integrates well full and idle and such
     hack have become needless.

   - Restart nohz full tick from irq exit.  (A simplification and a
     preparation for future optimization on scheduler kick to nohz
     full)

   - Code cleanups.

   - Tile driver isolation enhancement on top of nohz.  (Chris Metcalf)"

* 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  nohz: Remove useless argument on tick_nohz_task_switch()
  nohz: Move tick_nohz_restart_sched_tick() above its users
  nohz: Restart nohz full tick from irq exit
  nohz: Remove idle task special case
  nohz: Prevent tilegx network driver interrupts
  alpha: Fix jiffies based cputime assumption
  apm32: Fix cputime == jiffies assumption
  jiffies: Remove HZ > USEC_PER_SEC special case
parents 418c2e1f 555ee95a
...@@ -1138,6 +1138,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) ...@@ -1138,6 +1138,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
{ {
struct rusage32 r; struct rusage32 r;
cputime_t utime, stime; cputime_t utime, stime;
unsigned long utime_jiffies, stime_jiffies;
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
return -EINVAL; return -EINVAL;
...@@ -1146,14 +1147,18 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) ...@@ -1146,14 +1147,18 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
switch (who) { switch (who) {
case RUSAGE_SELF: case RUSAGE_SELF:
task_cputime(current, &utime, &stime); task_cputime(current, &utime, &stime);
jiffies_to_timeval32(utime, &r.ru_utime); utime_jiffies = cputime_to_jiffies(utime);
jiffies_to_timeval32(stime, &r.ru_stime); stime_jiffies = cputime_to_jiffies(stime);
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
r.ru_minflt = current->min_flt; r.ru_minflt = current->min_flt;
r.ru_majflt = current->maj_flt; r.ru_majflt = current->maj_flt;
break; break;
case RUSAGE_CHILDREN: case RUSAGE_CHILDREN:
jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); utime_jiffies = cputime_to_jiffies(current->signal->cutime);
jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); stime_jiffies = cputime_to_jiffies(current->signal->cstime);
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
r.ru_minflt = current->signal->cmin_flt; r.ru_minflt = current->signal->cmin_flt;
r.ru_majflt = current->signal->cmaj_flt; r.ru_majflt = current->signal->cmaj_flt;
break; break;
......
...@@ -919,7 +919,7 @@ static int apm_cpu_idle(struct cpuidle_device *dev, ...@@ -919,7 +919,7 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
} else if (jiffies_since_last_check > idle_period) { } else if (jiffies_since_last_check > idle_period) {
unsigned int idle_percentage; unsigned int idle_percentage;
idle_percentage = stime - last_stime; idle_percentage = cputime_to_jiffies(stime - last_stime);
idle_percentage *= 100; idle_percentage *= 100;
idle_percentage /= jiffies_since_last_check; idle_percentage /= jiffies_since_last_check;
use_apm_idle = (idle_percentage > idle_threshold); use_apm_idle = (idle_percentage > idle_threshold);
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/tick.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/homecache.h> #include <asm/homecache.h>
...@@ -2273,7 +2274,8 @@ static int __init tile_net_init_module(void) ...@@ -2273,7 +2274,8 @@ static int __init tile_net_init_module(void)
tile_net_dev_init(name, mac); tile_net_dev_init(name, mac);
if (!network_cpus_init()) if (!network_cpus_init())
network_cpus_map = *cpu_online_mask; cpumask_and(&network_cpus_map, housekeeping_cpumask(),
cpu_online_mask);
return 0; return 0;
} }
......
...@@ -363,18 +363,11 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) ...@@ -363,18 +363,11 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
} }
extern unsigned long __usecs_to_jiffies(const unsigned int u); extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) #if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u) static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{ {
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
} }
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return u * (HZ / USEC_PER_SEC);
}
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
#else #else
static inline unsigned long _usecs_to_jiffies(const unsigned int u) static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{ {
......
...@@ -147,22 +147,29 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) ...@@ -147,22 +147,29 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask); cpumask_or(mask, mask, tick_nohz_full_mask);
} }
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void); extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu); extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void); extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk); extern void __tick_nohz_task_switch(void);
#else #else
static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { } static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { } static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { } static inline void tick_nohz_full_kick_all(void) { }
static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } static inline void __tick_nohz_task_switch(void) { }
#endif #endif
static inline const struct cpumask *housekeeping_cpumask(void)
{
#ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_enabled())
return housekeeping_mask;
#endif
return cpu_possible_mask;
}
static inline bool is_housekeeping_cpu(int cpu) static inline bool is_housekeeping_cpu(int cpu)
{ {
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
...@@ -181,16 +188,10 @@ static inline void housekeeping_affine(struct task_struct *t) ...@@ -181,16 +188,10 @@ static inline void housekeeping_affine(struct task_struct *t)
#endif #endif
} }
static inline void tick_nohz_full_check(void) static inline void tick_nohz_task_switch(void)
{
if (tick_nohz_full_enabled())
__tick_nohz_full_check();
}
static inline void tick_nohz_task_switch(struct task_struct *tsk)
{ {
if (tick_nohz_full_enabled()) if (tick_nohz_full_enabled())
__tick_nohz_task_switch(tsk); __tick_nohz_task_switch();
} }
#endif #endif
...@@ -2543,7 +2543,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) ...@@ -2543,7 +2543,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
put_task_struct(prev); put_task_struct(prev);
} }
tick_nohz_task_switch(current); tick_nohz_task_switch();
return rq; return rq;
} }
......
...@@ -197,27 +197,9 @@ static bool can_stop_full_tick(void) ...@@ -197,27 +197,9 @@ static bool can_stop_full_tick(void)
return true; return true;
} }
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
/*
* Re-evaluate the need for the tick on the current CPU
* and restart it if necessary.
*/
void __tick_nohz_full_check(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_nohz_full_cpu(smp_processor_id())) {
if (ts->tick_stopped && !is_idle_task(current)) {
if (!can_stop_full_tick())
tick_nohz_restart_sched_tick(ts, ktime_get());
}
}
}
static void nohz_full_kick_work_func(struct irq_work *work) static void nohz_full_kick_work_func(struct irq_work *work)
{ {
__tick_nohz_full_check(); /* Empty, the tick restart happens on tick_nohz_irq_exit() */
} }
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
...@@ -252,7 +234,7 @@ void tick_nohz_full_kick_cpu(int cpu) ...@@ -252,7 +234,7 @@ void tick_nohz_full_kick_cpu(int cpu)
static void nohz_full_kick_ipi(void *info) static void nohz_full_kick_ipi(void *info)
{ {
__tick_nohz_full_check(); /* Empty, the tick restart happens on tick_nohz_irq_exit() */
} }
/* /*
...@@ -276,7 +258,7 @@ void tick_nohz_full_kick_all(void) ...@@ -276,7 +258,7 @@ void tick_nohz_full_kick_all(void)
* It might need the tick due to per task/process properties: * It might need the tick due to per task/process properties:
* perf events, posix cpu timers, ... * perf events, posix cpu timers, ...
*/ */
void __tick_nohz_task_switch(struct task_struct *tsk) void __tick_nohz_task_switch(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -705,21 +687,38 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -705,21 +687,38 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
return tick; return tick;
} }
static void tick_nohz_full_stop_tick(struct tick_sched *ts) static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
update_cpu_load_nohz();
calc_load_exit_idle();
touch_softlockup_watchdog();
/*
* Cancel the scheduled timer and restore the tick
*/
ts->tick_stopped = 0;
ts->idle_exittime = now;
tick_nohz_restart(ts, now);
}
static void tick_nohz_full_update_tick(struct tick_sched *ts)
{ {
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) if (!tick_nohz_full_cpu(cpu))
return; return;
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
return; return;
if (!can_stop_full_tick()) if (can_stop_full_tick())
return; tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
else if (ts->tick_stopped)
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); tick_nohz_restart_sched_tick(ts, ktime_get());
#endif #endif
} }
...@@ -849,7 +848,7 @@ void tick_nohz_irq_exit(void) ...@@ -849,7 +848,7 @@ void tick_nohz_irq_exit(void)
if (ts->inidle) if (ts->inidle)
__tick_nohz_idle_enter(ts); __tick_nohz_idle_enter(ts);
else else
tick_nohz_full_stop_tick(ts); tick_nohz_full_update_tick(ts);
} }
/** /**
...@@ -864,23 +863,6 @@ ktime_t tick_nohz_get_sleep_length(void) ...@@ -864,23 +863,6 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length; return ts->sleep_length;
} }
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
update_cpu_load_nohz();
calc_load_exit_idle();
touch_softlockup_watchdog();
/*
* Cancel the scheduled timer and restore the tick
*/
ts->tick_stopped = 0;
ts->idle_exittime = now;
tick_nohz_restart(ts, now);
}
static void tick_nohz_account_idle_ticks(struct tick_sched *ts) static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
{ {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
......
...@@ -268,10 +268,14 @@ EXPORT_SYMBOL(jiffies_to_msecs); ...@@ -268,10 +268,14 @@ EXPORT_SYMBOL(jiffies_to_msecs);
unsigned int jiffies_to_usecs(const unsigned long j) unsigned int jiffies_to_usecs(const unsigned long j)
{ {
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) /*
* Hz usually doesn't go much further MSEC_PER_SEC.
* jiffies_to_usecs() and usecs_to_jiffies() depend on that.
*/
BUILD_BUG_ON(HZ > USEC_PER_SEC);
#if !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j; return (USEC_PER_SEC / HZ) * j;
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
#else #else
# if BITS_PER_LONG == 32 # if BITS_PER_LONG == 32
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment