Commit 3dd63b7d authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[PATCH] SMP iowait stats

Patch from William Lee Irwin III <wli@holomorphy.com>

Idle time accounting is disturbed by the iowait statistics, for several
reasons:

(1) iowait time is not subdivided among cpus.
        The only way the distinction between idle time subtracted from
        cpus (in order to be accounted as iowait) can be made is by
        summing counters for a total and dividing the individual tick
        counters by the proportions. Any tick type resolution which is
        not properly per-cpu breaks this, meaning that cpus which are
        entirely idle, when any iowait is present on the system, will
        have all idle ticks accounted to iowait instead of true idle time.

(2) kstat_read_proc() misreports iowait time
        The idle tick counter is passed twice to the sprintf(), once
        in the idle tick position, and once in the iowait tick position.

(3) performance enhancement
        The O(1) scheduler was very carefully constructed to perform
        accesses only to localized cachelines whenever possible. The
        global counter violates one of its core design principles,
        and the localization of "most" accesses is in greater harmony
        with its overall design and provides (at the very least) a
        qualitative performance improvement wrt. cache.

The method of correcting this is simple: embed an atomic iowait counter
in the runqueues, find the runqueue being manipulated in io_schedule(),
increment its atomic counter prior to schedule(), and decrement it
after returning from schedule(), which is guaranteed to be the same one,
as the counter incremented is tracked as a variable local to the procedure.
Then simply sum to obtain a global iowait statistic.

(Atomicity is required as the post-wait decrement may occur on a different
cpu from the one owning the counter.)

io_schedule() and io_schedule_timeout() are moved to sched.c as they must
access the runqueues, which are private to sched.c, and nr_iowait() is
created in order to export the sum of all runqueues' nr_iowait().
parent 33f9ef1c
...@@ -56,7 +56,6 @@ static int queue_nr_requests; ...@@ -56,7 +56,6 @@ static int queue_nr_requests;
static int batch_requests; static int batch_requests;
unsigned long blk_max_low_pfn, blk_max_pfn; unsigned long blk_max_low_pfn, blk_max_pfn;
atomic_t nr_iowait_tasks = ATOMIC_INIT(0);
int blk_nohighio = 0; int blk_nohighio = 0;
static struct congestion_state { static struct congestion_state {
...@@ -115,27 +114,6 @@ static void set_queue_congested(request_queue_t *q, int rw) ...@@ -115,27 +114,6 @@ static void set_queue_congested(request_queue_t *q, int rw)
atomic_inc(&congestion_states[rw].nr_congested_queues); atomic_inc(&congestion_states[rw].nr_congested_queues);
} }
/*
* This task is about to go to sleep on IO. Increment nr_iowait_tasks so
* that process accounting knows that this is a task in IO wait state.
*
* But don't do that if it is a deliberate, throttling IO wait (this task
* has set its backing_dev_info: the queue against which it should throttle)
*/
void io_schedule(void)
{
atomic_inc(&nr_iowait_tasks);
schedule();
atomic_dec(&nr_iowait_tasks);
}
void io_schedule_timeout(long timeout)
{
atomic_inc(&nr_iowait_tasks);
schedule_timeout(timeout);
atomic_dec(&nr_iowait_tasks);
}
/** /**
* blk_get_backing_dev_info - get the address of a queue's backing_dev_info * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
* @dev: device * @dev: device
......
...@@ -372,7 +372,7 @@ static int kstat_read_proc(char *page, char **start, off_t off, ...@@ -372,7 +372,7 @@ static int kstat_read_proc(char *page, char **start, off_t off,
jiffies_to_clock_t(kstat_cpu(i).cpustat.nice), jiffies_to_clock_t(kstat_cpu(i).cpustat.nice),
jiffies_to_clock_t(kstat_cpu(i).cpustat.system), jiffies_to_clock_t(kstat_cpu(i).cpustat.system),
jiffies_to_clock_t(kstat_cpu(i).cpustat.idle), jiffies_to_clock_t(kstat_cpu(i).cpustat.idle),
jiffies_to_clock_t(kstat_cpu(i).cpustat.idle)); jiffies_to_clock_t(kstat_cpu(i).cpustat.iowait));
} }
len += sprintf(page + len, "intr %u", sum); len += sprintf(page + len, "intr %u", sum);
...@@ -406,12 +406,12 @@ static int kstat_read_proc(char *page, char **start, off_t off, ...@@ -406,12 +406,12 @@ static int kstat_read_proc(char *page, char **start, off_t off,
"btime %lu\n" "btime %lu\n"
"processes %lu\n" "processes %lu\n"
"procs_running %lu\n" "procs_running %lu\n"
"procs_blocked %u\n", "procs_blocked %lu\n",
nr_context_switches(), nr_context_switches(),
xtime.tv_sec - jif / HZ, xtime.tv_sec - jif / HZ,
total_forks, total_forks,
nr_running(), nr_running(),
atomic_read(&nr_iowait_tasks)); nr_iowait());
return proc_calc_metrics(page, start, off, count, eof, len); return proc_calc_metrics(page, start, off, count, eof, len);
} }
......
...@@ -470,9 +470,4 @@ static inline void put_dev_sector(Sector p) ...@@ -470,9 +470,4 @@ static inline void put_dev_sector(Sector p)
#endif #endif
extern atomic_t nr_iowait_tasks;
void io_schedule(void);
void io_schedule_timeout(long timeout);
#endif #endif
...@@ -88,6 +88,7 @@ extern int nr_threads; ...@@ -88,6 +88,7 @@ extern int nr_threads;
extern int last_pid; extern int last_pid;
extern unsigned long nr_running(void); extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void); extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
#include <linux/time.h> #include <linux/time.h>
#include <linux/param.h> #include <linux/param.h>
...@@ -147,6 +148,8 @@ extern void show_trace(unsigned long *stack); ...@@ -147,6 +148,8 @@ extern void show_trace(unsigned long *stack);
extern void show_stack(unsigned long *stack); extern void show_stack(unsigned long *stack);
extern void show_regs(struct pt_regs *); extern void show_regs(struct pt_regs *);
void io_schedule(void);
void io_schedule_timeout(long timeout);
extern void cpu_init (void); extern void cpu_init (void);
extern void trap_init(void); extern void trap_init(void);
......
...@@ -157,6 +157,7 @@ struct runqueue { ...@@ -157,6 +157,7 @@ struct runqueue {
task_t *migration_thread; task_t *migration_thread;
struct list_head migration_queue; struct list_head migration_queue;
atomic_t nr_iowait;
} ____cacheline_aligned; } ____cacheline_aligned;
static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
...@@ -557,9 +558,11 @@ unsigned long nr_uninterruptible(void) ...@@ -557,9 +558,11 @@ unsigned long nr_uninterruptible(void)
{ {
unsigned long i, sum = 0; unsigned long i, sum = 0;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
sum += cpu_rq(i)->nr_uninterruptible; sum += cpu_rq(i)->nr_uninterruptible;
}
return sum; return sum;
} }
...@@ -567,9 +570,23 @@ unsigned long nr_context_switches(void) ...@@ -567,9 +570,23 @@ unsigned long nr_context_switches(void)
{ {
unsigned long i, sum = 0; unsigned long i, sum = 0;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
sum += cpu_rq(i)->nr_switches; sum += cpu_rq(i)->nr_switches;
}
return sum;
}
unsigned long nr_iowait(void)
{
unsigned long i, sum = 0;
for (i = 0; i < NR_CPUS; ++i) {
if (!cpu_online(i))
continue;
sum += atomic_read(&cpu_rq(i)->nr_iowait);
}
return sum; return sum;
} }
...@@ -875,7 +892,7 @@ void scheduler_tick(int user_ticks, int sys_ticks) ...@@ -875,7 +892,7 @@ void scheduler_tick(int user_ticks, int sys_ticks)
/* note: this timer irq context must be accounted for as well */ /* note: this timer irq context must be accounted for as well */
if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET) if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET)
kstat_cpu(cpu).cpustat.system += sys_ticks; kstat_cpu(cpu).cpustat.system += sys_ticks;
else if (atomic_read(&nr_iowait_tasks) > 0) else if (atomic_read(&rq->nr_iowait) > 0)
kstat_cpu(cpu).cpustat.iowait += sys_ticks; kstat_cpu(cpu).cpustat.iowait += sys_ticks;
else else
kstat_cpu(cpu).cpustat.idle += sys_ticks; kstat_cpu(cpu).cpustat.idle += sys_ticks;
...@@ -1712,6 +1729,31 @@ void yield(void) ...@@ -1712,6 +1729,31 @@ void yield(void)
sys_sched_yield(); sys_sched_yield();
} }
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*
* But don't do that if it is a deliberate, throttling IO wait (this task
* has set its backing_dev_info: the queue against which it should throttle)
*/
void io_schedule(void)
{
struct runqueue *rq = this_rq();
atomic_inc(&rq->nr_iowait);
schedule();
atomic_dec(&rq->nr_iowait);
}
void io_schedule_timeout(long timeout)
{
struct runqueue *rq = this_rq();
atomic_inc(&rq->nr_iowait);
schedule_timeout(timeout);
atomic_dec(&rq->nr_iowait);
}
/** /**
* sys_sched_get_priority_max - return maximum RT priority. * sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class. * @policy: scheduling class.
...@@ -2160,6 +2202,7 @@ void __init sched_init(void) ...@@ -2160,6 +2202,7 @@ void __init sched_init(void)
rq->expired = rq->arrays + 1; rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock); spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->migration_queue); INIT_LIST_HEAD(&rq->migration_queue);
atomic_set(&rq->nr_iowait, 0);
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
array = rq->arrays + j; array = rq->arrays + j;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment