Commit d4e79615 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "The main changes in this cycle are:

   - Make schedstats a runtime tunable (disabled by default) and
     optimize it via static keys.

     As most distributions enable CONFIG_SCHEDSTATS=y due to its
     instrumentation value, this is a nice performance enhancement.
     (Mel Gorman)

   - Implement 'simple waitqueues' (swait): these are just pure
     waitqueues without any of the more complex features of full-blown
     waitqueues (callbacks, wake flags, wake keys, etc.).  Simple
     waitqueues have less memory overhead and are faster.

     Use simple waitqueues in the RCU code (in 4 different places) and
     for handling KVM vCPU wakeups.

     (Peter Zijlstra, Daniel Wagner, Thomas Gleixner, Paul Gortmaker,
     Marcelo Tosatti)

   - sched/numa enhancements (Rik van Riel)

   - NOHZ performance enhancements (Rik van Riel)

   - Various sched/deadline enhancements (Steven Rostedt)

   - Various fixes (Peter Zijlstra)

   - ... and a number of other fixes, cleanups and smaller enhancements"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits)
  sched/cputime: Fix steal_account_process_tick() to always return jiffies
  sched/deadline: Remove dl_new from struct sched_dl_entity
  Revert "kbuild: Add option to turn incompatible pointer check into error"
  sched/deadline: Remove superfluous call to switched_to_dl()
  sched/debug: Fix preempt_disable_ip recording for preempt_disable()
  sched, time: Switch VIRT_CPU_ACCOUNTING_GEN to jiffy granularity
  time, acct: Drop irq save & restore from __acct_update_integrals()
  acct, time: Change indentation in __acct_update_integrals()
  sched, time: Remove non-power-of-two divides from __acct_update_integrals()
  sched/rt: Kick RT bandwidth timer immediately on start up
  sched/debug: Add deadline scheduler bandwidth ratio to /proc/sched_debug
  sched/debug: Move sched_domain_sysctl to debug.c
  sched/debug: Move the /sys/kernel/debug/sched_features file setup into debug.c
  sched/rt: Fix PI handling vs. sched_setscheduler()
  sched/core: Remove duplicated sched_group_set_shares() prototype
  sched/fair: Consolidate nohz CPU load update code
  sched/fair: Avoid using decay_load_missed() with a negative value
  sched/deadline: Always calculate end of period on sched_yield()
  sched/cgroup: Fix cgroup entity load tracking tear-down
  rcu: Use simple wait queues where possible in rcutree
  ...
parents d88bfe1d f9c904b7
...@@ -3532,6 +3532,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3532,6 +3532,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
sched_debug [KNL] Enables verbose scheduler debug messages. sched_debug [KNL] Enables verbose scheduler debug messages.
schedstats= [KNL,X86] Enable or disable scheduled statistics.
Allowed values are enable and disable. This feature
incurs a small amount of overhead in the scheduler
but is useful for debugging and performance tuning.
skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
xtime_lock contention on larger systems, and/or RCU lock xtime_lock contention on larger systems, and/or RCU lock
contention on all systems with CONFIG_MAXSMP set. contention on all systems with CONFIG_MAXSMP set.
......
...@@ -773,6 +773,14 @@ rtsig-nr shows the number of RT signals currently queued. ...@@ -773,6 +773,14 @@ rtsig-nr shows the number of RT signals currently queued.
============================================================== ==============================================================
sched_schedstats:
Enables/disables scheduler statistics. Enabling this feature
incurs a small amount of overhead in the scheduler but is
useful for debugging and performance tuning.
==============================================================
sg-big-buff: sg-big-buff:
This file shows the size of the generic SCSI (sg) buffer. This file shows the size of the generic SCSI (sg) buffer.
......
...@@ -506,18 +506,18 @@ static void kvm_arm_resume_guest(struct kvm *kvm) ...@@ -506,18 +506,18 @@ static void kvm_arm_resume_guest(struct kvm *kvm)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu->arch.pause = false; vcpu->arch.pause = false;
wake_up_interruptible(wq); swake_up(wq);
} }
} }
static void vcpu_sleep(struct kvm_vcpu *vcpu) static void vcpu_sleep(struct kvm_vcpu *vcpu)
{ {
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause))); (!vcpu->arch.pause)));
} }
......
...@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{ {
struct kvm *kvm = source_vcpu->kvm; struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL; struct kvm_vcpu *vcpu = NULL;
wait_queue_head_t *wq; struct swait_queue_head *wq;
unsigned long cpu_id; unsigned long cpu_id;
unsigned long context_id; unsigned long context_id;
phys_addr_t target_pc; phys_addr_t target_pc;
...@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
smp_mb(); /* Make sure the above is visible */ smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu); wq = kvm_arch_vcpu_wq(vcpu);
wake_up_interruptible(wq); swake_up(wq);
return PSCI_RET_SUCCESS; return PSCI_RET_SUCCESS;
} }
......
...@@ -445,8 +445,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, ...@@ -445,8 +445,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0; dvcpu->arch.wait = 0;
if (waitqueue_active(&dvcpu->wq)) if (swait_active(&dvcpu->wq))
wake_up_interruptible(&dvcpu->wq); swake_up(&dvcpu->wq);
return 0; return 0;
} }
...@@ -1174,8 +1174,8 @@ static void kvm_mips_comparecount_func(unsigned long data) ...@@ -1174,8 +1174,8 @@ static void kvm_mips_comparecount_func(unsigned long data)
kvm_mips_callbacks->queue_timer_int(vcpu); kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu->arch.wait = 0; vcpu->arch.wait = 0;
if (waitqueue_active(&vcpu->wq)) if (swait_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); swake_up(&vcpu->wq);
} }
/* low level hrtimer wake routine */ /* low level hrtimer wake routine */
......
...@@ -289,7 +289,7 @@ struct kvmppc_vcore { ...@@ -289,7 +289,7 @@ struct kvmppc_vcore {
struct list_head runnable_threads; struct list_head runnable_threads;
struct list_head preempt_list; struct list_head preempt_list;
spinlock_t lock; spinlock_t lock;
wait_queue_head_t wq; struct swait_queue_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb; u64 stolen_tb;
u64 preempt_tb; u64 preempt_tb;
...@@ -629,7 +629,7 @@ struct kvm_vcpu_arch { ...@@ -629,7 +629,7 @@ struct kvm_vcpu_arch {
u8 prodded; u8 prodded;
u32 last_inst; u32 last_inst;
wait_queue_head_t *wqp; struct swait_queue_head *wqp;
struct kvmppc_vcore *vcore; struct kvmppc_vcore *vcore;
int ret; int ret;
int trap; int trap;
......
...@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu) ...@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{ {
int cpu; int cpu;
wait_queue_head_t *wqp; struct swait_queue_head *wqp;
wqp = kvm_arch_vcpu_wq(vcpu); wqp = kvm_arch_vcpu_wq(vcpu);
if (waitqueue_active(wqp)) { if (swait_active(wqp)) {
wake_up_interruptible(wqp); swake_up(wqp);
++vcpu->stat.halt_wakeup; ++vcpu->stat.halt_wakeup;
} }
...@@ -701,8 +701,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -701,8 +701,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
tvcpu->arch.prodded = 1; tvcpu->arch.prodded = 1;
smp_mb(); smp_mb();
if (vcpu->arch.ceded) { if (vcpu->arch.ceded) {
if (waitqueue_active(&vcpu->wq)) { if (swait_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq); swake_up(&vcpu->wq);
vcpu->stat.halt_wakeup++; vcpu->stat.halt_wakeup++;
} }
} }
...@@ -1459,7 +1459,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) ...@@ -1459,7 +1459,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
INIT_LIST_HEAD(&vcore->runnable_threads); INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock); spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock); spin_lock_init(&vcore->stoltb_lock);
init_waitqueue_head(&vcore->wq); init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL; vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr; vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore; vcore->first_vcpuid = core * threads_per_subcore;
...@@ -2531,10 +2531,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ...@@ -2531,10 +2531,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int do_sleep = 1; int do_sleep = 1;
DECLARE_SWAITQUEUE(wait);
DEFINE_WAIT(wait); prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
/* /*
* Check one last time for pending exceptions and ceded state after * Check one last time for pending exceptions and ceded state after
...@@ -2548,7 +2547,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ...@@ -2548,7 +2547,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
} }
if (!do_sleep) { if (!do_sleep) {
finish_wait(&vc->wq, &wait); finish_swait(&vc->wq, &wait);
return; return;
} }
...@@ -2556,7 +2555,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ...@@ -2556,7 +2555,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
trace_kvmppc_vcore_blocked(vc, 0); trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock); spin_unlock(&vc->lock);
schedule(); schedule();
finish_wait(&vc->wq, &wait); finish_swait(&vc->wq, &wait);
spin_lock(&vc->lock); spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1); trace_kvmppc_vcore_blocked(vc, 1);
...@@ -2612,7 +2611,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -2612,7 +2611,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_start_thread(vcpu, vc); kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu); trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) { } else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq); swake_up(&vc->wq);
} }
} }
......
...@@ -467,7 +467,7 @@ struct kvm_s390_irq_payload { ...@@ -467,7 +467,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_local_interrupt { struct kvm_s390_local_interrupt {
spinlock_t lock; spinlock_t lock;
struct kvm_s390_float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq; struct swait_queue_head *wq;
atomic_t *cpuflags; atomic_t *cpuflags;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq; struct kvm_s390_irq_payload irq;
......
...@@ -966,13 +966,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ...@@ -966,13 +966,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{ {
if (waitqueue_active(&vcpu->wq)) { if (swait_active(&vcpu->wq)) {
/* /*
* The vcpu gave up the cpu voluntarily, mark it as a good * The vcpu gave up the cpu voluntarily, mark it as a good
* yield-candidate. * yield-candidate.
*/ */
vcpu->preempted = true; vcpu->preempted = true;
wake_up_interruptible(&vcpu->wq); swake_up(&vcpu->wq);
vcpu->stat.halt_wakeup++; vcpu->stat.halt_wakeup++;
} }
} }
......
...@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic) ...@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
static void apic_timer_expired(struct kvm_lapic *apic) static void apic_timer_expired(struct kvm_lapic *apic)
{ {
struct kvm_vcpu *vcpu = apic->vcpu; struct kvm_vcpu *vcpu = apic->vcpu;
wait_queue_head_t *q = &vcpu->wq; struct swait_queue_head *q = &vcpu->wq;
struct kvm_timer *ktimer = &apic->lapic_timer; struct kvm_timer *ktimer = &apic->lapic_timer;
if (atomic_read(&apic->lapic_timer.pending)) if (atomic_read(&apic->lapic_timer.pending))
...@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kvm_lapic *apic) ...@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kvm_lapic *apic)
atomic_inc(&apic->lapic_timer.pending); atomic_inc(&apic->lapic_timer.pending);
kvm_set_pending_timer(vcpu); kvm_set_pending_timer(vcpu);
if (waitqueue_active(q)) if (swait_active(q))
wake_up_interruptible(q); swake_up(q);
if (apic_lvtt_tscdeadline(apic)) if (apic_lvtt_tscdeadline(apic))
ktimer->expired_tscdeadline = ktimer->tscdeadline; ktimer->expired_tscdeadline = ktimer->tscdeadline;
......
...@@ -713,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled) ...@@ -713,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
static inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
if (!in_lock_functions(addr))
return addr;
addr = CALLER_ADDR1;
if (!in_lock_functions(addr))
return addr;
return CALLER_ADDR2;
}
#ifdef CONFIG_IRQSOFF_TRACER #ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1); extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1); extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/irqbypass.h> #include <linux/irqbypass.h>
#include <linux/swait.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <linux/kvm.h> #include <linux/kvm.h>
...@@ -218,7 +219,7 @@ struct kvm_vcpu { ...@@ -218,7 +219,7 @@ struct kvm_vcpu {
int fpu_active; int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded; int guest_fpu_loaded, guest_xcr0_loaded;
unsigned char fpu_counter; unsigned char fpu_counter;
wait_queue_head_t wq; struct swait_queue_head wq;
struct pid *pid; struct pid *pid;
int sigset_active; int sigset_active;
sigset_t sigset; sigset_t sigset;
...@@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) ...@@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
} }
#endif #endif
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{ {
#ifdef __KVM_HAVE_ARCH_WQP #ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp; return vcpu->arch.wqp;
......
...@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter) ...@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
void clear_all_latency_tracing(struct task_struct *p); void clear_all_latency_tracing(struct task_struct *p);
extern int sysctl_latencytop(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#else #else
static inline void static inline void
......
...@@ -182,8 +182,6 @@ extern void update_cpu_load_nohz(int active); ...@@ -182,8 +182,6 @@ extern void update_cpu_load_nohz(int active);
static inline void update_cpu_load_nohz(int active) { } static inline void update_cpu_load_nohz(int active) { }
#endif #endif
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu); extern void dump_cpu_task(int cpu);
struct seq_file; struct seq_file;
...@@ -920,6 +918,10 @@ static inline int sched_info_on(void) ...@@ -920,6 +918,10 @@ static inline int sched_info_on(void)
#endif #endif
} }
#ifdef CONFIG_SCHEDSTATS
void force_schedstat_enabled(void);
#endif
enum cpu_idle_type { enum cpu_idle_type {
CPU_IDLE, CPU_IDLE,
CPU_NOT_IDLE, CPU_NOT_IDLE,
...@@ -1289,6 +1291,8 @@ struct sched_rt_entity { ...@@ -1289,6 +1291,8 @@ struct sched_rt_entity {
unsigned long timeout; unsigned long timeout;
unsigned long watchdog_stamp; unsigned long watchdog_stamp;
unsigned int time_slice; unsigned int time_slice;
unsigned short on_rq;
unsigned short on_list;
struct sched_rt_entity *back; struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
...@@ -1329,10 +1333,6 @@ struct sched_dl_entity { ...@@ -1329,10 +1333,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the * task has to wait for a replenishment to be performed at the
* next firing of dl_timer. * next firing of dl_timer.
* *
* @dl_new tells if a new instance arrived. If so we must
* start executing it with full runtime and reset its absolute
* deadline;
*
* @dl_boosted tells if we are boosted due to DI. If so we are * @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we * outside bandwidth enforcement mechanism (but only until we
* exit the critical section); * exit the critical section);
...@@ -1340,7 +1340,7 @@ struct sched_dl_entity { ...@@ -1340,7 +1340,7 @@ struct sched_dl_entity {
* @dl_yielded tells if task gave up the cpu before consuming * @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job. * all its available runtime during the last job.
*/ */
int dl_throttled, dl_new, dl_boosted, dl_yielded; int dl_throttled, dl_boosted, dl_yielded;
/* /*
* Bandwidth enforcement timer. Each -deadline task has its * Bandwidth enforcement timer. Each -deadline task has its
......
...@@ -95,4 +95,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write, ...@@ -95,4 +95,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
extern int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif /* _SCHED_SYSCTL_H */ #endif /* _SCHED_SYSCTL_H */
#ifndef _LINUX_SWAIT_H
#define _LINUX_SWAIT_H
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <asm/current.h>
/*
* Simple wait queues
*
* While these are very similar to the other/complex wait queues (wait.h) the
* most important difference is that the simple waitqueue allows for
* deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
* times.
*
* In order to make this so, we had to drop a fair number of features of the
* other waitqueue code; notably:
*
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
* sleeper state.
*
* - the exclusive mode; because this requires preserving the list order
* and this is hard.
*
* - custom wake functions; because you cannot give any guarantees about
* random code.
*
* As a side effect of this; the data structures are slimmer.
*
* One would recommend using this wait queue where possible.
*/
struct task_struct;
struct swait_queue_head {
raw_spinlock_t lock;
struct list_head task_list;
};
struct swait_queue {
struct task_struct *task;
struct list_head task_list;
};
#define __SWAITQUEUE_INITIALIZER(name) { \
.task = current, \
.task_list = LIST_HEAD_INIT((name).task_list), \
}
#define DECLARE_SWAITQUEUE(name) \
struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.task_list = LIST_HEAD_INIT((name).task_list), \
}
#define DECLARE_SWAIT_QUEUE_HEAD(name) \
struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key);
#define init_swait_queue_head(q) \
do { \
static struct lock_class_key __key; \
__init_swait_queue_head((q), #q, &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
({ init_swait_queue_head(&name); name; })
# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
#else
# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
DECLARE_SWAIT_QUEUE_HEAD(name)
#endif
static inline int swait_active(struct swait_queue_head *q)
{
return !list_empty(&q->task_list);
}
extern void swake_up(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
#define ___swait_event(wq, condition, state, ret, cmd) \
({ \
struct swait_queue __wait; \
long __ret = ret; \
\
INIT_LIST_HEAD(&__wait.task_list); \
for (;;) { \
long __int = prepare_to_swait_event(&wq, &__wait, state);\
\
if (condition) \
break; \
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
break; \
} \
\
cmd; \
} \
finish_swait(&wq, &__wait); \
__ret; \
})
#define __swait_event(wq, condition) \
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
schedule())
#define swait_event(wq, condition) \
do { \
if (condition) \
break; \
__swait_event(wq, condition); \
} while (0)
#define __swait_event_timeout(wq, condition, timeout) \
___swait_event(wq, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
#define swait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
__ret = __swait_event_timeout(wq, condition, timeout); \
__ret; \
})
#define __swait_event_interruptible(wq, condition) \
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
schedule())
#define swait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
__ret = __swait_event_interruptible(wq, condition); \
__ret; \
})
#define __swait_event_interruptible_timeout(wq, condition, timeout) \
___swait_event(wq, ___wait_cond_timeout(condition), \
TASK_INTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
#define swait_event_interruptible_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
__ret = __swait_event_interruptible_timeout(wq, \
condition, timeout); \
__ret; \
})
#endif /* _LINUX_SWAIT_H */
...@@ -338,7 +338,7 @@ do { \ ...@@ -338,7 +338,7 @@ do { \
schedule(); try_to_freeze()) schedule(); try_to_freeze())
/** /**
* wait_event - sleep (or freeze) until a condition gets true * wait_event_freezable - sleep (or freeze) until a condition gets true
* @wq: the waitqueue to wait on * @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for * @condition: a C expression for the event to wait for
* *
......
...@@ -47,12 +47,12 @@ ...@@ -47,12 +47,12 @@
* of times) * of times)
*/ */
#include <linux/latencytop.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/latencytop.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/list.h> #include <linux/list.h>
...@@ -289,4 +289,16 @@ static int __init init_lstats_procfs(void) ...@@ -289,4 +289,16 @@ static int __init init_lstats_procfs(void)
proc_create("latency_stats", 0644, NULL, &lstats_fops); proc_create("latency_stats", 0644, NULL, &lstats_fops);
return 0; return 0;
} }
int sysctl_latencytop(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err;
err = proc_dointvec(table, write, buffer, lenp, ppos);
if (latencytop_enabled)
force_schedstat_enabled();
return err;
}
device_initcall(init_lstats_procfs); device_initcall(init_lstats_procfs);
...@@ -59,6 +59,7 @@ int profile_setup(char *str) ...@@ -59,6 +59,7 @@ int profile_setup(char *str)
if (!strncmp(str, sleepstr, strlen(sleepstr))) { if (!strncmp(str, sleepstr, strlen(sleepstr))) {
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
force_schedstat_enabled();
prof_on = SLEEP_PROFILING; prof_on = SLEEP_PROFILING;
if (str[strlen(sleepstr)] == ',') if (str[strlen(sleepstr)] == ',')
str += strlen(sleepstr) + 1; str += strlen(sleepstr) + 1;
......
...@@ -1614,7 +1614,6 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -1614,7 +1614,6 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
int needmore; int needmore;
struct rcu_data *rdp = this_cpu_ptr(rsp->rda); struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
rcu_nocb_gp_cleanup(rsp, rnp);
rnp->need_future_gp[c & 0x1] = 0; rnp->need_future_gp[c & 0x1] = 0;
needmore = rnp->need_future_gp[(c + 1) & 0x1]; needmore = rnp->need_future_gp[(c + 1) & 0x1];
trace_rcu_future_gp(rnp, rdp, c, trace_rcu_future_gp(rnp, rdp, c,
...@@ -1635,7 +1634,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp) ...@@ -1635,7 +1634,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
!READ_ONCE(rsp->gp_flags) || !READ_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread) !rsp->gp_kthread)
return; return;
wake_up(&rsp->gp_wq); swake_up(&rsp->gp_wq);
} }
/* /*
...@@ -2010,6 +2009,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2010,6 +2009,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
int nocb = 0; int nocb = 0;
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp = rcu_get_root(rsp);
struct swait_queue_head *sq;
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp); raw_spin_lock_irq_rcu_node(rnp);
...@@ -2046,7 +2046,9 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2046,7 +2046,9 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
/* smp_mb() provided by prior unlock-lock pair. */ /* smp_mb() provided by prior unlock-lock pair. */
nocb += rcu_future_gp_cleanup(rsp, rnp); nocb += rcu_future_gp_cleanup(rsp, rnp);
sq = rcu_nocb_gp_get(rnp);
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq(&rnp->lock);
rcu_nocb_gp_cleanup(sq);
cond_resched_rcu_qs(); cond_resched_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
rcu_gp_slow(rsp, gp_cleanup_delay); rcu_gp_slow(rsp, gp_cleanup_delay);
...@@ -2092,7 +2094,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -2092,7 +2094,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
READ_ONCE(rsp->gpnum), READ_ONCE(rsp->gpnum),
TPS("reqwait")); TPS("reqwait"));
rsp->gp_state = RCU_GP_WAIT_GPS; rsp->gp_state = RCU_GP_WAIT_GPS;
wait_event_interruptible(rsp->gp_wq, swait_event_interruptible(rsp->gp_wq,
READ_ONCE(rsp->gp_flags) & READ_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT); RCU_GP_FLAG_INIT);
rsp->gp_state = RCU_GP_DONE_GPS; rsp->gp_state = RCU_GP_DONE_GPS;
...@@ -2122,7 +2124,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -2122,7 +2124,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
READ_ONCE(rsp->gpnum), READ_ONCE(rsp->gpnum),
TPS("fqswait")); TPS("fqswait"));
rsp->gp_state = RCU_GP_WAIT_FQS; rsp->gp_state = RCU_GP_WAIT_FQS;
ret = wait_event_interruptible_timeout(rsp->gp_wq, ret = swait_event_interruptible_timeout(rsp->gp_wq,
rcu_gp_fqs_check_wake(rsp, &gf), j); rcu_gp_fqs_check_wake(rsp, &gf), j);
rsp->gp_state = RCU_GP_DOING_FQS; rsp->gp_state = RCU_GP_DOING_FQS;
/* Locking provides needed memory barriers. */ /* Locking provides needed memory barriers. */
...@@ -2246,7 +2248,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) ...@@ -2246,7 +2248,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
rcu_gp_kthread_wake(rsp); swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
} }
/* /*
...@@ -2900,7 +2902,7 @@ static void force_quiescent_state(struct rcu_state *rsp) ...@@ -2900,7 +2902,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
} }
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
raw_spin_unlock_irqrestore(&rnp_old->lock, flags); raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
rcu_gp_kthread_wake(rsp); swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
} }
/* /*
...@@ -3529,7 +3531,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -3529,7 +3531,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (wake) { if (wake) {
smp_mb(); /* EGP done before wake_up(). */ smp_mb(); /* EGP done before wake_up(). */
wake_up(&rsp->expedited_wq); swake_up(&rsp->expedited_wq);
} }
break; break;
} }
...@@ -3780,7 +3782,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) ...@@ -3780,7 +3782,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
jiffies_start = jiffies; jiffies_start = jiffies;
for (;;) { for (;;) {
ret = wait_event_interruptible_timeout( ret = swait_event_timeout(
rsp->expedited_wq, rsp->expedited_wq,
sync_rcu_preempt_exp_done(rnp_root), sync_rcu_preempt_exp_done(rnp_root),
jiffies_stall); jiffies_stall);
...@@ -3788,7 +3790,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) ...@@ -3788,7 +3790,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
return; return;
if (ret < 0) { if (ret < 0) {
/* Hit a signal, disable CPU stall warnings. */ /* Hit a signal, disable CPU stall warnings. */
wait_event(rsp->expedited_wq, swait_event(rsp->expedited_wq,
sync_rcu_preempt_exp_done(rnp_root)); sync_rcu_preempt_exp_done(rnp_root));
return; return;
} }
...@@ -4482,8 +4484,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) ...@@ -4482,8 +4484,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
} }
} }
init_waitqueue_head(&rsp->gp_wq); init_swait_queue_head(&rsp->gp_wq);
init_waitqueue_head(&rsp->expedited_wq); init_swait_queue_head(&rsp->expedited_wq);
rnp = rsp->level[rcu_num_lvls - 1]; rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
while (i > rnp->grphi) while (i > rnp->grphi)
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include <linux/swait.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
/* /*
...@@ -243,7 +244,7 @@ struct rcu_node { ...@@ -243,7 +244,7 @@ struct rcu_node {
/* Refused to boost: not sure why, though. */ /* Refused to boost: not sure why, though. */
/* This can happen due to race conditions. */ /* This can happen due to race conditions. */
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
wait_queue_head_t nocb_gp_wq[2]; struct swait_queue_head nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */ /* Place for rcu_nocb_kthread() to wait GP. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
int need_future_gp[2]; int need_future_gp[2];
...@@ -399,7 +400,7 @@ struct rcu_data { ...@@ -399,7 +400,7 @@ struct rcu_data {
atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
struct rcu_head **nocb_follower_tail; struct rcu_head **nocb_follower_tail;
wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
struct task_struct *nocb_kthread; struct task_struct *nocb_kthread;
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
...@@ -478,7 +479,7 @@ struct rcu_state { ...@@ -478,7 +479,7 @@ struct rcu_state {
unsigned long gpnum; /* Current gp number. */ unsigned long gpnum; /* Current gp number. */
unsigned long completed; /* # of last completed gp. */ unsigned long completed; /* # of last completed gp. */
struct task_struct *gp_kthread; /* Task for grace periods. */ struct task_struct *gp_kthread; /* Task for grace periods. */
wait_queue_head_t gp_wq; /* Where GP task waits. */ struct swait_queue_head gp_wq; /* Where GP task waits. */
short gp_flags; /* Commands for GP task. */ short gp_flags; /* Commands for GP task. */
short gp_state; /* GP kthread sleep state. */ short gp_state; /* GP kthread sleep state. */
...@@ -506,7 +507,7 @@ struct rcu_state { ...@@ -506,7 +507,7 @@ struct rcu_state {
unsigned long expedited_sequence; /* Take a ticket. */ unsigned long expedited_sequence; /* Take a ticket. */
atomic_long_t expedited_normal; /* # fallbacks to normal. */ atomic_long_t expedited_normal; /* # fallbacks to normal. */
atomic_t expedited_need_qs; /* # CPUs left to check in. */ atomic_t expedited_need_qs; /* # CPUs left to check in. */
wait_queue_head_t expedited_wq; /* Wait for check-ins. */ struct swait_queue_head expedited_wq; /* Wait for check-ins. */
int ncpus_snap; /* # CPUs seen last time. */ int ncpus_snap; /* # CPUs seen last time. */
unsigned long jiffies_force_qs; /* Time at which to invoke */ unsigned long jiffies_force_qs; /* Time at which to invoke */
...@@ -621,7 +622,8 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp); ...@@ -621,7 +622,8 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static void increment_cpu_stall_ticks(void); static void increment_cpu_stall_ticks(void);
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
static void rcu_init_one_nocb(struct rcu_node *rnp); static void rcu_init_one_nocb(struct rcu_node *rnp);
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy, unsigned long flags); bool lazy, unsigned long flags);
......
...@@ -1811,9 +1811,9 @@ early_param("rcu_nocb_poll", parse_rcu_nocb_poll); ...@@ -1811,9 +1811,9 @@ early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
* Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
* grace period. * grace period.
*/ */
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
{ {
wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); swake_up_all(sq);
} }
/* /*
...@@ -1829,10 +1829,15 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) ...@@ -1829,10 +1829,15 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
} }
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
{
return &rnp->nocb_gp_wq[rnp->completed & 0x1];
}
static void rcu_init_one_nocb(struct rcu_node *rnp) static void rcu_init_one_nocb(struct rcu_node *rnp)
{ {
init_waitqueue_head(&rnp->nocb_gp_wq[0]); init_swait_queue_head(&rnp->nocb_gp_wq[0]);
init_waitqueue_head(&rnp->nocb_gp_wq[1]); init_swait_queue_head(&rnp->nocb_gp_wq[1]);
} }
#ifndef CONFIG_RCU_NOCB_CPU_ALL #ifndef CONFIG_RCU_NOCB_CPU_ALL
...@@ -1857,7 +1862,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) ...@@ -1857,7 +1862,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
/* Prior smp_mb__after_atomic() orders against prior enqueue. */ /* Prior smp_mb__after_atomic() orders against prior enqueue. */
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
wake_up(&rdp_leader->nocb_wq); swake_up(&rdp_leader->nocb_wq);
} }
} }
...@@ -2069,7 +2074,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2069,7 +2074,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
*/ */
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) { for (;;) {
wait_event_interruptible( swait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1], rnp->nocb_gp_wq[c & 0x1],
(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
if (likely(d)) if (likely(d))
...@@ -2097,7 +2102,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) ...@@ -2097,7 +2102,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
/* Wait for callbacks to appear. */ /* Wait for callbacks to appear. */
if (!rcu_nocb_poll) { if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
wait_event_interruptible(my_rdp->nocb_wq, swait_event_interruptible(my_rdp->nocb_wq,
!READ_ONCE(my_rdp->nocb_leader_sleep)); !READ_ONCE(my_rdp->nocb_leader_sleep));
/* Memory barrier handled by smp_mb() calls below and repoll. */ /* Memory barrier handled by smp_mb() calls below and repoll. */
} else if (firsttime) { } else if (firsttime) {
...@@ -2172,7 +2177,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) ...@@ -2172,7 +2177,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
* List was empty, wake up the follower. * List was empty, wake up the follower.
* Memory barriers supplied by atomic_long_add(). * Memory barriers supplied by atomic_long_add().
*/ */
wake_up(&rdp->nocb_wq); swake_up(&rdp->nocb_wq);
} }
} }
...@@ -2193,7 +2198,7 @@ static void nocb_follower_wait(struct rcu_data *rdp) ...@@ -2193,7 +2198,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
if (!rcu_nocb_poll) { if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
"FollowerSleep"); "FollowerSleep");
wait_event_interruptible(rdp->nocb_wq, swait_event_interruptible(rdp->nocb_wq,
READ_ONCE(rdp->nocb_follower_head)); READ_ONCE(rdp->nocb_follower_head));
} else if (firsttime) { } else if (firsttime) {
/* Don't drown trace log with "Poll"! */ /* Don't drown trace log with "Poll"! */
...@@ -2352,7 +2357,7 @@ void __init rcu_init_nohz(void) ...@@ -2352,7 +2357,7 @@ void __init rcu_init_nohz(void)
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{ {
rdp->nocb_tail = &rdp->nocb_head; rdp->nocb_tail = &rdp->nocb_head;
init_waitqueue_head(&rdp->nocb_wq); init_swait_queue_head(&rdp->nocb_wq);
rdp->nocb_follower_tail = &rdp->nocb_follower_head; rdp->nocb_follower_tail = &rdp->nocb_follower_head;
} }
...@@ -2502,7 +2507,7 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) ...@@ -2502,7 +2507,7 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
return false; return false;
} }
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
{ {
} }
...@@ -2510,6 +2515,11 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) ...@@ -2510,6 +2515,11 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
{ {
} }
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
{
return NULL;
}
static void rcu_init_one_nocb(struct rcu_node *rnp) static void rcu_init_one_nocb(struct rcu_node *rnp)
{ {
} }
......
...@@ -13,7 +13,7 @@ endif ...@@ -13,7 +13,7 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o completion.o idle.o obj-y += wait.o swait.o completion.o idle.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHEDSTATS) += stats.o
......
This diff is collapsed.
...@@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void) ...@@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
if (static_key_false(&paravirt_steal_enabled)) { if (static_key_false(&paravirt_steal_enabled)) {
u64 steal; u64 steal;
cputime_t steal_ct; unsigned long steal_jiffies;
steal = paravirt_steal_clock(smp_processor_id()); steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time; steal -= this_rq()->prev_steal_time;
/* /*
* cputime_t may be less precise than nsecs (eg: if it's * steal is in nsecs but our caller is expecting steal
* based on jiffies). Lets cast the result to cputime * time in jiffies. Lets cast the result to jiffies
* granularity and account the rest on the next rounds. * granularity and account the rest on the next rounds.
*/ */
steal_ct = nsecs_to_cputime(steal); steal_jiffies = nsecs_to_jiffies(steal);
this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
account_steal_time(steal_ct); account_steal_time(jiffies_to_cputime(steal_jiffies));
return steal_ct; return steal_jiffies;
} }
#endif #endif
return false; return false;
...@@ -668,26 +668,25 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime ...@@ -668,26 +668,25 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static unsigned long long vtime_delta(struct task_struct *tsk) static cputime_t vtime_delta(struct task_struct *tsk)
{ {
unsigned long long clock; unsigned long now = READ_ONCE(jiffies);
clock = local_clock(); if (time_before(now, (unsigned long)tsk->vtime_snap))
if (clock < tsk->vtime_snap)
return 0; return 0;
return clock - tsk->vtime_snap; return jiffies_to_cputime(now - tsk->vtime_snap);
} }
static cputime_t get_vtime_delta(struct task_struct *tsk) static cputime_t get_vtime_delta(struct task_struct *tsk)
{ {
unsigned long long delta = vtime_delta(tsk); unsigned long now = READ_ONCE(jiffies);
unsigned long delta = now - tsk->vtime_snap;
WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
tsk->vtime_snap += delta; tsk->vtime_snap = now;
/* CHECKME: always safe to convert nsecs to cputime? */ return jiffies_to_cputime(delta);
return nsecs_to_cputime(delta);
} }
static void __vtime_account_system(struct task_struct *tsk) static void __vtime_account_system(struct task_struct *tsk)
...@@ -699,6 +698,9 @@ static void __vtime_account_system(struct task_struct *tsk) ...@@ -699,6 +698,9 @@ static void __vtime_account_system(struct task_struct *tsk)
void vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk)
{ {
if (!vtime_delta(tsk))
return;
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk); __vtime_account_system(tsk);
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&tsk->vtime_seqcount);
...@@ -707,7 +709,8 @@ void vtime_account_system(struct task_struct *tsk) ...@@ -707,7 +709,8 @@ void vtime_account_system(struct task_struct *tsk)
void vtime_gen_account_irq_exit(struct task_struct *tsk) void vtime_gen_account_irq_exit(struct task_struct *tsk)
{ {
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk); if (vtime_delta(tsk))
__vtime_account_system(tsk);
if (context_tracking_in_user()) if (context_tracking_in_user())
tsk->vtime_snap_whence = VTIME_USER; tsk->vtime_snap_whence = VTIME_USER;
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&tsk->vtime_seqcount);
...@@ -718,16 +721,19 @@ void vtime_account_user(struct task_struct *tsk) ...@@ -718,16 +721,19 @@ void vtime_account_user(struct task_struct *tsk)
cputime_t delta_cpu; cputime_t delta_cpu;
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&tsk->vtime_seqcount);
delta_cpu = get_vtime_delta(tsk);
tsk->vtime_snap_whence = VTIME_SYS; tsk->vtime_snap_whence = VTIME_SYS;
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); if (vtime_delta(tsk)) {
delta_cpu = get_vtime_delta(tsk);
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
}
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&tsk->vtime_seqcount);
} }
void vtime_user_enter(struct task_struct *tsk) void vtime_user_enter(struct task_struct *tsk)
{ {
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk); if (vtime_delta(tsk))
__vtime_account_system(tsk);
tsk->vtime_snap_whence = VTIME_USER; tsk->vtime_snap_whence = VTIME_USER;
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&tsk->vtime_seqcount);
} }
...@@ -742,7 +748,8 @@ void vtime_guest_enter(struct task_struct *tsk) ...@@ -742,7 +748,8 @@ void vtime_guest_enter(struct task_struct *tsk)
* that can thus safely catch up with a tickless delta. * that can thus safely catch up with a tickless delta.
*/ */
write_seqcount_begin(&tsk->vtime_seqcount); write_seqcount_begin(&tsk->vtime_seqcount);
__vtime_account_system(tsk); if (vtime_delta(tsk))
__vtime_account_system(tsk);
current->flags |= PF_VCPU; current->flags |= PF_VCPU;
write_seqcount_end(&tsk->vtime_seqcount); write_seqcount_end(&tsk->vtime_seqcount);
} }
...@@ -772,7 +779,7 @@ void arch_vtime_task_switch(struct task_struct *prev) ...@@ -772,7 +779,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
write_seqcount_begin(&current->vtime_seqcount); write_seqcount_begin(&current->vtime_seqcount);
current->vtime_snap_whence = VTIME_SYS; current->vtime_snap_whence = VTIME_SYS;
current->vtime_snap = sched_clock_cpu(smp_processor_id()); current->vtime_snap = jiffies;
write_seqcount_end(&current->vtime_seqcount); write_seqcount_end(&current->vtime_seqcount);
} }
...@@ -783,7 +790,7 @@ void vtime_init_idle(struct task_struct *t, int cpu) ...@@ -783,7 +790,7 @@ void vtime_init_idle(struct task_struct *t, int cpu)
local_irq_save(flags); local_irq_save(flags);
write_seqcount_begin(&t->vtime_seqcount); write_seqcount_begin(&t->vtime_seqcount);
t->vtime_snap_whence = VTIME_SYS; t->vtime_snap_whence = VTIME_SYS;
t->vtime_snap = sched_clock_cpu(cpu); t->vtime_snap = jiffies;
write_seqcount_end(&t->vtime_seqcount); write_seqcount_end(&t->vtime_seqcount);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -352,7 +352,15 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, ...@@ -352,7 +352,15 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq); struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
* We are racing with the deadline timer. So, do nothing because
* the deadline timer handler will take care of properly recharging
* the runtime and postponing the deadline
*/
if (dl_se->dl_throttled)
return;
/* /*
* We use the regular wall clock time to set deadlines in the * We use the regular wall clock time to set deadlines in the
...@@ -361,7 +369,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, ...@@ -361,7 +369,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
*/ */
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime; dl_se->runtime = pi_se->dl_runtime;
dl_se->dl_new = 0;
} }
/* /*
...@@ -399,6 +406,9 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, ...@@ -399,6 +406,9 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
dl_se->runtime = pi_se->dl_runtime; dl_se->runtime = pi_se->dl_runtime;
} }
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
/* /*
* We keep moving the deadline away until we get some * We keep moving the deadline away until we get some
* available runtime for the entity. This ensures correct * available runtime for the entity. This ensures correct
...@@ -500,15 +510,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, ...@@ -500,15 +510,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq); struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* The arrival of a new instance needs special treatment, i.e.,
* the actual scheduling parameters have to be "renewed".
*/
if (dl_se->dl_new) {
setup_new_dl_entity(dl_se, pi_se);
return;
}
if (dl_time_before(dl_se->deadline, rq_clock(rq)) || if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
...@@ -604,16 +605,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) ...@@ -604,16 +605,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
goto unlock; goto unlock;
} }
/*
* This is possible if switched_from_dl() raced against a running
* callback that took the above !dl_task() path and we've since then
* switched back into SCHED_DEADLINE.
*
* There's nothing to do except drop our task reference.
*/
if (dl_se->dl_new)
goto unlock;
/* /*
* The task might have been boosted by someone else and might be in the * The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled. * boosting/deboosting path, its not throttled.
...@@ -735,8 +726,11 @@ static void update_curr_dl(struct rq *rq) ...@@ -735,8 +726,11 @@ static void update_curr_dl(struct rq *rq)
* approach need further study. * approach need further study.
*/ */
delta_exec = rq_clock_task(rq) - curr->se.exec_start; delta_exec = rq_clock_task(rq) - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) if (unlikely((s64)delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
return; return;
}
schedstat_set(curr->se.statistics.exec_max, schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec)); max(curr->se.statistics.exec_max, delta_exec));
...@@ -749,8 +743,10 @@ static void update_curr_dl(struct rq *rq) ...@@ -749,8 +743,10 @@ static void update_curr_dl(struct rq *rq)
sched_rt_avg_update(rq, delta_exec); sched_rt_avg_update(rq, delta_exec);
dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; dl_se->runtime -= delta_exec;
if (dl_runtime_exceeded(dl_se)) {
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1; dl_se->dl_throttled = 1;
__dequeue_task_dl(rq, curr, 0); __dequeue_task_dl(rq, curr, 0);
if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
...@@ -917,7 +913,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, ...@@ -917,7 +913,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise, * parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime. * we want a replenishment of its runtime.
*/ */
if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) if (flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se); update_dl_entity(dl_se, pi_se);
else if (flags & ENQUEUE_REPLENISH) else if (flags & ENQUEUE_REPLENISH)
replenish_dl_entity(dl_se, pi_se); replenish_dl_entity(dl_se, pi_se);
...@@ -994,18 +990,14 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) ...@@ -994,18 +990,14 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
*/ */
static void yield_task_dl(struct rq *rq) static void yield_task_dl(struct rq *rq)
{ {
struct task_struct *p = rq->curr;
/* /*
* We make the task go to sleep until its current deadline by * We make the task go to sleep until its current deadline by
* forcing its runtime to zero. This way, update_curr_dl() stops * forcing its runtime to zero. This way, update_curr_dl() stops
* it and the bandwidth timer will wake it up and will give it * it and the bandwidth timer will wake it up and will give it
* new scheduling parameters (thanks to dl_yielded=1). * new scheduling parameters (thanks to dl_yielded=1).
*/ */
if (p->dl.runtime > 0) { rq->curr->dl.dl_yielded = 1;
rq->curr->dl.dl_yielded = 1;
p->dl.runtime = 0;
}
update_rq_clock(rq); update_rq_clock(rq);
update_curr_dl(rq); update_curr_dl(rq);
/* /*
...@@ -1722,6 +1714,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) ...@@ -1722,6 +1714,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
*/ */
static void switched_to_dl(struct rq *rq, struct task_struct *p) static void switched_to_dl(struct rq *rq, struct task_struct *p)
{ {
if (dl_time_before(p->dl.deadline, rq_clock(rq)))
setup_new_dl_entity(&p->dl, &p->dl);
if (task_on_rq_queued(p) && rq->curr != p) { if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
...@@ -1768,8 +1763,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, ...@@ -1768,8 +1763,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
*/ */
resched_curr(rq); resched_curr(rq);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
} else }
switched_to_dl(rq, p);
} }
const struct sched_class dl_sched_class = { const struct sched_class dl_sched_class = {
......
This diff is collapsed.
This diff is collapsed.
...@@ -58,7 +58,15 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ...@@ -58,7 +58,15 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
raw_spin_lock(&rt_b->rt_runtime_lock); raw_spin_lock(&rt_b->rt_runtime_lock);
if (!rt_b->rt_period_active) { if (!rt_b->rt_period_active) {
rt_b->rt_period_active = 1; rt_b->rt_period_active = 1;
hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period); /*
* SCHED_DEADLINE updates the bandwidth, as a run away
* RT task with a DL task could hog a CPU. But DL does
* not reset the period. If a deadline task was running
* without an RT task running, it can cause RT tasks to
* throttle when they start up. Kick the timer right away
* to update the period.
*/
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED); hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
} }
raw_spin_unlock(&rt_b->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock);
...@@ -436,7 +444,7 @@ static void dequeue_top_rt_rq(struct rt_rq *rt_rq); ...@@ -436,7 +444,7 @@ static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
static inline int on_rt_rq(struct sched_rt_entity *rt_se) static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{ {
return !list_empty(&rt_se->run_list); return rt_se->on_rq;
} }
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
...@@ -482,8 +490,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) ...@@ -482,8 +490,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
return rt_se->my_q; return rt_se->my_q;
} }
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{ {
...@@ -499,7 +507,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) ...@@ -499,7 +507,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
if (!rt_se) if (!rt_se)
enqueue_top_rt_rq(rt_rq); enqueue_top_rt_rq(rt_rq);
else if (!on_rt_rq(rt_se)) else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, false); enqueue_rt_entity(rt_se, 0);
if (rt_rq->highest_prio.curr < curr->prio) if (rt_rq->highest_prio.curr < curr->prio)
resched_curr(rq); resched_curr(rq);
...@@ -516,7 +524,7 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) ...@@ -516,7 +524,7 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
if (!rt_se) if (!rt_se)
dequeue_top_rt_rq(rt_rq); dequeue_top_rt_rq(rt_rq);
else if (on_rt_rq(rt_se)) else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se, 0);
} }
static inline int rt_rq_throttled(struct rt_rq *rt_rq) static inline int rt_rq_throttled(struct rt_rq *rt_rq)
...@@ -1166,7 +1174,30 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -1166,7 +1174,30 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
dec_rt_group(rt_se, rt_rq); dec_rt_group(rt_se, rt_rq);
} }
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) /*
* Change rt_se->run_list location unless SAVE && !MOVE
*
* assumes ENQUEUE/DEQUEUE flags match
*/
static inline bool move_entity(unsigned int flags)
{
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
return false;
return true;
}
static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
list_del_init(&rt_se->run_list);
if (list_empty(array->queue + rt_se_prio(rt_se)))
__clear_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 0;
}
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{ {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
...@@ -1179,26 +1210,37 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) ...@@ -1179,26 +1210,37 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
* get throttled and the current group doesn't have any other * get throttled and the current group doesn't have any other
* active members. * active members.
*/ */
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
if (rt_se->on_list)
__delist_rt_entity(rt_se, array);
return; return;
}
if (head) if (move_entity(flags)) {
list_add(&rt_se->run_list, queue); WARN_ON_ONCE(rt_se->on_list);
else if (flags & ENQUEUE_HEAD)
list_add_tail(&rt_se->run_list, queue); list_add(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap); else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 1;
}
rt_se->on_rq = 1;
inc_rt_tasks(rt_se, rt_rq); inc_rt_tasks(rt_se, rt_rq);
} }
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{ {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
list_del_init(&rt_se->run_list); if (move_entity(flags)) {
if (list_empty(array->queue + rt_se_prio(rt_se))) WARN_ON_ONCE(!rt_se->on_list);
__clear_bit(rt_se_prio(rt_se), array->bitmap); __delist_rt_entity(rt_se, array);
}
rt_se->on_rq = 0;
dec_rt_tasks(rt_se, rt_rq); dec_rt_tasks(rt_se, rt_rq);
} }
...@@ -1207,7 +1249,7 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) ...@@ -1207,7 +1249,7 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Because the prio of an upper entry depends on the lower * Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down. * entries, we must remove entries top - down.
*/ */
static void dequeue_rt_stack(struct sched_rt_entity *rt_se) static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{ {
struct sched_rt_entity *back = NULL; struct sched_rt_entity *back = NULL;
...@@ -1220,31 +1262,31 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) ...@@ -1220,31 +1262,31 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
for (rt_se = back; rt_se; rt_se = rt_se->back) { for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se)) if (on_rt_rq(rt_se))
__dequeue_rt_entity(rt_se); __dequeue_rt_entity(rt_se, flags);
} }
} }
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{ {
struct rq *rq = rq_of_rt_se(rt_se); struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se); dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se) for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se, head); __enqueue_rt_entity(rt_se, flags);
enqueue_top_rt_rq(&rq->rt); enqueue_top_rt_rq(&rq->rt);
} }
static void dequeue_rt_entity(struct sched_rt_entity *rt_se) static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{ {
struct rq *rq = rq_of_rt_se(rt_se); struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se); dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se) { for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se); struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running) if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se, false); __enqueue_rt_entity(rt_se, flags);
} }
enqueue_top_rt_rq(&rq->rt); enqueue_top_rt_rq(&rq->rt);
} }
...@@ -1260,7 +1302,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) ...@@ -1260,7 +1302,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (flags & ENQUEUE_WAKEUP) if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0; rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1) if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
...@@ -1271,7 +1313,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) ...@@ -1271,7 +1313,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq); update_curr_rt(rq);
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se, flags);
dequeue_pushable_task(rq, p); dequeue_pushable_task(rq, p);
} }
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/sched/rt.h> #include <linux/sched/rt.h>
#include <linux/sched/deadline.h> #include <linux/sched/deadline.h>
#include <linux/binfmts.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
...@@ -313,12 +314,11 @@ extern int tg_nop(struct task_group *tg, void *data); ...@@ -313,12 +314,11 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg); extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
extern void unregister_fair_sched_group(struct task_group *tg, int cpu); extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu, struct sched_entity *se, int cpu,
struct sched_entity *parent); struct sched_entity *parent);
extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
...@@ -909,6 +909,18 @@ static inline unsigned int group_first_cpu(struct sched_group *group) ...@@ -909,6 +909,18 @@ static inline unsigned int group_first_cpu(struct sched_group *group)
extern int group_balance_cpu(struct sched_group *sg); extern int group_balance_cpu(struct sched_group *sg);
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
void register_sched_domain_sysctl(void);
void unregister_sched_domain_sysctl(void);
#else
static inline void register_sched_domain_sysctl(void)
{
}
static inline void unregister_sched_domain_sysctl(void)
{
}
#endif
#else #else
static inline void sched_ttwu_pending(void) { } static inline void sched_ttwu_pending(void) { }
...@@ -1022,6 +1034,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; ...@@ -1022,6 +1034,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
extern struct static_key_false sched_numa_balancing; extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;
static inline u64 global_rt_period(void) static inline u64 global_rt_period(void)
{ {
...@@ -1130,18 +1143,40 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -1130,18 +1143,40 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
extern const int sched_prio_to_weight[40]; extern const int sched_prio_to_weight[40];
extern const u32 sched_prio_to_wmult[40]; extern const u32 sched_prio_to_wmult[40];
/*
* {de,en}queue flags:
*
* DEQUEUE_SLEEP - task is no longer runnable
* ENQUEUE_WAKEUP - task just became runnable
*
* SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
* are in a known state which allows modification. Such pairs
* should preserve as much state as possible.
*
* MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
* in the runqueue.
*
* ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
* ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
* ENQUEUE_WAKING - sched_class::task_waking was called
*
*/
#define DEQUEUE_SLEEP 0x01
#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
#define ENQUEUE_WAKEUP 0x01 #define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_HEAD 0x02 #define ENQUEUE_RESTORE 0x02
#define ENQUEUE_MOVE 0x04
#define ENQUEUE_HEAD 0x08
#define ENQUEUE_REPLENISH 0x10
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */ #define ENQUEUE_WAKING 0x20
#else #else
#define ENQUEUE_WAKING 0x00 #define ENQUEUE_WAKING 0x00
#endif #endif
#define ENQUEUE_REPLENISH 0x08
#define ENQUEUE_RESTORE 0x10
#define DEQUEUE_SLEEP 0x01
#define DEQUEUE_SAVE 0x02
#define RETRY_TASK ((void *)-1UL) #define RETRY_TASK ((void *)-1UL)
......
...@@ -29,9 +29,10 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) ...@@ -29,9 +29,10 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
if (rq) if (rq)
rq->rq_sched_info.run_delay += delta; rq->rq_sched_info.run_delay += delta;
} }
# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) # define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
# define schedstat_set(var, val) do { var = (val); } while (0) # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
#else /* !CONFIG_SCHEDSTATS */ #else /* !CONFIG_SCHEDSTATS */
static inline void static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long long delta) rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
...@@ -42,6 +43,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) ...@@ -42,6 +43,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
static inline void static inline void
rq_sched_info_depart(struct rq *rq, unsigned long long delta) rq_sched_info_depart(struct rq *rq, unsigned long long delta)
{} {}
# define schedstat_enabled() 0
# define schedstat_inc(rq, field) do { } while (0) # define schedstat_inc(rq, field) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0) # define schedstat_add(rq, field, amt) do { } while (0)
# define schedstat_set(var, val) do { } while (0) # define schedstat_set(var, val) do { } while (0)
......
#include <linux/sched.h>
#include <linux/swait.h>
void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key)
{
raw_spin_lock_init(&q->lock);
lockdep_set_class_and_name(&q->lock, key, name);
INIT_LIST_HEAD(&q->task_list);
}
EXPORT_SYMBOL(__init_swait_queue_head);
/*
* The thing about the wake_up_state() return value; I think we can ignore it.
*
* If for some reason it would return 0, that means the previously waiting
* task is already running, so it will observe condition true (or has already).
*/
void swake_up_locked(struct swait_queue_head *q)
{
struct swait_queue *curr;
if (list_empty(&q->task_list))
return;
curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
wake_up_process(curr->task);
list_del_init(&curr->task_list);
}
EXPORT_SYMBOL(swake_up_locked);
void swake_up(struct swait_queue_head *q)
{
unsigned long flags;
if (!swait_active(q))
return;
raw_spin_lock_irqsave(&q->lock, flags);
swake_up_locked(q);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(swake_up);
/*
* Does not allow usage from IRQ disabled, since we must be able to
* release IRQs to guarantee bounded hold time.
*/
void swake_up_all(struct swait_queue_head *q)
{
struct swait_queue *curr;
LIST_HEAD(tmp);
if (!swait_active(q))
return;
raw_spin_lock_irq(&q->lock);
list_splice_init(&q->task_list, &tmp);
while (!list_empty(&tmp)) {
curr = list_first_entry(&tmp, typeof(*curr), task_list);
wake_up_state(curr->task, TASK_NORMAL);
list_del_init(&curr->task_list);
if (list_empty(&tmp))
break;
raw_spin_unlock_irq(&q->lock);
raw_spin_lock_irq(&q->lock);
}
raw_spin_unlock_irq(&q->lock);
}
EXPORT_SYMBOL(swake_up_all);
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
wait->task = current;
if (list_empty(&wait->task_list))
list_add(&wait->task_list, &q->task_list);
}
void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
unsigned long flags;
raw_spin_lock_irqsave(&q->lock, flags);
__prepare_to_swait(q, wait);
set_current_state(state);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_swait);
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
if (signal_pending_state(state, current))
return -ERESTARTSYS;
prepare_to_swait(q, wait, state);
return 0;
}
EXPORT_SYMBOL(prepare_to_swait_event);
void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
__set_current_state(TASK_RUNNING);
if (!list_empty(&wait->task_list))
list_del_init(&wait->task_list);
}
void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
unsigned long flags;
__set_current_state(TASK_RUNNING);
if (!list_empty_careful(&wait->task_list)) {
raw_spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
}
EXPORT_SYMBOL(finish_swait);
...@@ -116,9 +116,9 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) ...@@ -116,9 +116,9 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
if (preempt_count() == cnt) { if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT #ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1); current->preempt_disable_ip = get_lock_parent_ip();
#endif #endif
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
} }
} }
EXPORT_SYMBOL(__local_bh_disable_ip); EXPORT_SYMBOL(__local_bh_disable_ip);
......
...@@ -350,6 +350,17 @@ static struct ctl_table kern_table[] = { ...@@ -350,6 +350,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#ifdef CONFIG_SCHEDSTATS
{
.procname = "sched_schedstats",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_schedstats,
.extra1 = &zero,
.extra2 = &one,
},
#endif /* CONFIG_SCHEDSTATS */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
{ {
...@@ -505,7 +516,7 @@ static struct ctl_table kern_table[] = { ...@@ -505,7 +516,7 @@ static struct ctl_table kern_table[] = {
.data = &latencytop_enabled, .data = &latencytop_enabled,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = sysctl_latencytop,
}, },
#endif #endif
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
......
...@@ -93,9 +93,11 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) ...@@ -93,9 +93,11 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{ {
struct mm_struct *mm; struct mm_struct *mm;
/* convert pages-usec to Mbyte-usec */ /* convert pages-nsec/1024 to Mbyte-usec, see __acct_update_integrals */
stats->coremem = p->acct_rss_mem1 * PAGE_SIZE / MB; stats->coremem = p->acct_rss_mem1 * PAGE_SIZE;
stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE / MB; do_div(stats->coremem, 1000 * KB);
stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE;
do_div(stats->virtmem, 1000 * KB);
mm = get_task_mm(p); mm = get_task_mm(p);
if (mm) { if (mm) {
/* adjust to KB unit */ /* adjust to KB unit */
...@@ -123,27 +125,28 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) ...@@ -123,27 +125,28 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
static void __acct_update_integrals(struct task_struct *tsk, static void __acct_update_integrals(struct task_struct *tsk,
cputime_t utime, cputime_t stime) cputime_t utime, cputime_t stime)
{ {
if (likely(tsk->mm)) { cputime_t time, dtime;
cputime_t time, dtime; u64 delta;
struct timeval value;
unsigned long flags; if (!likely(tsk->mm))
u64 delta; return;
local_irq_save(flags); time = stime + utime;
time = stime + utime; dtime = time - tsk->acct_timexpd;
dtime = time - tsk->acct_timexpd; /* Avoid division: cputime_t is often in nanoseconds already. */
jiffies_to_timeval(cputime_to_jiffies(dtime), &value); delta = cputime_to_nsecs(dtime);
delta = value.tv_sec;
delta = delta * USEC_PER_SEC + value.tv_usec; if (delta < TICK_NSEC)
return;
if (delta == 0)
goto out; tsk->acct_timexpd = time;
tsk->acct_timexpd = time; /*
tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); * Divide by 1024 to avoid overflow, and to avoid division.
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; * The final unit reported to userspace is Mbyte-usecs,
out: * the rest of the math is done in xacct_add_tsk.
local_irq_restore(flags); */
} tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10;
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm >> 10;
} }
/** /**
...@@ -153,9 +156,12 @@ static void __acct_update_integrals(struct task_struct *tsk, ...@@ -153,9 +156,12 @@ static void __acct_update_integrals(struct task_struct *tsk,
void acct_update_integrals(struct task_struct *tsk) void acct_update_integrals(struct task_struct *tsk)
{ {
cputime_t utime, stime; cputime_t utime, stime;
unsigned long flags;
local_irq_save(flags);
task_cputime(tsk, &utime, &stime); task_cputime(tsk, &utime, &stime);
__acct_update_integrals(tsk, utime, stime); __acct_update_integrals(tsk, utime, stime);
local_irq_restore(flags);
} }
/** /**
......
...@@ -97,8 +97,8 @@ static void async_pf_execute(struct work_struct *work) ...@@ -97,8 +97,8 @@ static void async_pf_execute(struct work_struct *work)
* This memory barrier pairs with prepare_to_wait's set_current_state() * This memory barrier pairs with prepare_to_wait's set_current_state()
*/ */
smp_mb(); smp_mb();
if (waitqueue_active(&vcpu->wq)) if (swait_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); swake_up(&vcpu->wq);
mmput(mm); mmput(mm);
kvm_put_kvm(vcpu->kvm); kvm_put_kvm(vcpu->kvm);
......
...@@ -216,8 +216,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) ...@@ -216,8 +216,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->kvm = kvm; vcpu->kvm = kvm;
vcpu->vcpu_id = id; vcpu->vcpu_id = id;
vcpu->pid = NULL; vcpu->pid = NULL;
vcpu->halt_poll_ns = 0; init_swait_queue_head(&vcpu->wq);
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu); kvm_async_pf_vcpu_init(vcpu);
vcpu->pre_pcpu = -1; vcpu->pre_pcpu = -1;
...@@ -1993,7 +1992,7 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) ...@@ -1993,7 +1992,7 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
void kvm_vcpu_block(struct kvm_vcpu *vcpu) void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{ {
ktime_t start, cur; ktime_t start, cur;
DEFINE_WAIT(wait); DECLARE_SWAITQUEUE(wait);
bool waited = false; bool waited = false;
u64 block_ns; u64 block_ns;
...@@ -2018,7 +2017,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -2018,7 +2017,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu); kvm_arch_vcpu_blocking(vcpu);
for (;;) { for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0) if (kvm_vcpu_check_block(vcpu) < 0)
break; break;
...@@ -2027,7 +2026,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -2027,7 +2026,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
schedule(); schedule();
} }
finish_wait(&vcpu->wq, &wait); finish_swait(&vcpu->wq, &wait);
cur = ktime_get(); cur = ktime_get();
kvm_arch_vcpu_unblocking(vcpu); kvm_arch_vcpu_unblocking(vcpu);
...@@ -2059,11 +2058,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) ...@@ -2059,11 +2058,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{ {
int me; int me;
int cpu = vcpu->cpu; int cpu = vcpu->cpu;
wait_queue_head_t *wqp; struct swait_queue_head *wqp;
wqp = kvm_arch_vcpu_wq(vcpu); wqp = kvm_arch_vcpu_wq(vcpu);
if (waitqueue_active(wqp)) { if (swait_active(wqp)) {
wake_up_interruptible(wqp); swake_up(wqp);
++vcpu->stat.halt_wakeup; ++vcpu->stat.halt_wakeup;
} }
...@@ -2164,7 +2163,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) ...@@ -2164,7 +2163,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue; continue;
if (vcpu == me) if (vcpu == me)
continue; continue;
if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
continue; continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment