Commit 15e19695 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] add /proc/vmstat (start of /proc/stat cleanup)

Moves the VM accounting out of /proc/stat and into /proc/vmstat.

The VM accounting is now per-cpu.

It also moves kstat.pgpgin and kstat.pgpgout into /proc/vmstat.
Which is a bit of a duplication of /proc/diskstats (SARD), but it's
easy, super-cheap and makes life a lot easier for all the system
monitoring applications which we just broke.

We now require procps 2.0.9.

Updated versions of top and vmstat are available at http://surriel.com
and the Cygnus CVS is uptodate for these changes.  (Rik has the CVS
info at the above site).

This tidies up kernel_stat quite a lot - it now only contains CPU
things (interrupts and CPU loads) and disk things.  So we now have:

/proc/stat:	CPU things and disk things
/proc/vmstat:	VM things	(plus pgpgin, pgpgout)

The SARD patch removes the disk things from /proc/stat as well.
parent 735a2573
......@@ -31,7 +31,7 @@ al espa
Eine deutsche Version dieser Datei finden Sie unter
<http://www.stefan-winter.de/Changes-2.4.0.txt>.
Last updated: January 22, 2002
Last updated: October 1st, 2002
Chris Ricker (kaboom@gatech.edu or chris.ricker@genetics.utah.edu).
......@@ -60,7 +60,8 @@ o xfsprogs 2.1.0 # xfs_db -V
o pcmcia-cs 3.1.21 # cardmgr -V
o PPP 2.4.0 # pppd --version
o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version
o procps 2.0.9 # ps --version
Kernel compilation
==================
......@@ -80,9 +81,7 @@ almost certainly bugs (mainly, but not exclusively, in the kernel) that
will need to be fixed in order to use these compilers. In any case, using
pgcc instead of plain gcc is just asking for trouble.
Note that gcc 2.7.2.3 and gcc 2.91.66 (egcs-1.1.2) are no longer supported
kernel compilers. The kernel no longer works around bugs in these versions,
and, in fact, will refuse to be compiled with it.
gcc 2.91.66 (egcs-1.1.2) continues to be supported for SPARC64 requirements.
The Red Hat gcc 2.96 compiler subtree can also be used to build this tree.
You should ensure you use gcc-2.96-74 or later. gcc-2.96-54 will not build
......
......@@ -1856,21 +1856,14 @@ int submit_bio(int rw, struct bio *bio)
{
int count = bio_sectors(bio);
/*
* do some validity checks...
*/
BUG_ON(!bio->bi_end_io);
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw = rw;
if (rw & WRITE)
kstat.pgpgout += count;
mod_page_state(pgpgout, count);
else
kstat.pgpgin += count;
mod_page_state(pgpgin, count);
generic_make_request(bio);
return 1;
}
......
......@@ -252,6 +252,18 @@ static struct file_operations proc_cpuinfo_operations = {
.release = seq_release,
};
extern struct seq_operations vmstat_op;
static int vmstat_open(struct inode *inode, struct file *file)
{
return seq_open(file, &vmstat_op);
}
static struct file_operations proc_vmstat_file_operations = {
open: vmstat_open,
read: seq_read,
llseek: seq_lseek,
release: seq_release,
};
#ifdef CONFIG_PROC_HARDWARE
static int hardware_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
......@@ -359,16 +371,8 @@ static int kstat_read_proc(char *page, char **start, off_t off,
+ kstat.per_cpu_nice[i] \
+ kstat.per_cpu_system[i])));
}
len += sprintf(page + len,
"page %u %u\n"
"swap %u %u\n"
"intr %u",
kstat.pgpgin >> 1,
kstat.pgpgout >> 1,
kstat.pswpin,
kstat.pswpout,
sum
);
len += sprintf(page + len, "intr %u", sum);
#if !defined(CONFIG_ARCH_S390)
for (i = 0 ; i < NR_IRQS ; i++)
len += sprintf(page + len, " %u", kstat_irqs(i));
......@@ -395,29 +399,9 @@ static int kstat_read_proc(char *page, char **start, off_t off,
}
len += sprintf(page + len,
"\npageallocs %u\n"
"pagefrees %u\n"
"pageactiv %u\n"
"pagedeact %u\n"
"pagefault %u\n"
"majorfault %u\n"
"pagescan %u\n"
"pagesteal %u\n"
"pageoutrun %u\n"
"allocstall %u\n"
"ctxt %lu\n"
"\nctxt %lu\n"
"btime %lu\n"
"processes %lu\n",
kstat.pgalloc,
kstat.pgfree,
kstat.pgactivate,
kstat.pgdeactivate,
kstat.pgfault,
kstat.pgmajfault,
kstat.pgscan,
kstat.pgsteal,
kstat.pageoutrun,
kstat.allocstall,
nr_context_switches(),
xtime.tv_sec - jif / HZ,
total_forks);
......@@ -646,6 +630,7 @@ void __init proc_misc_init(void)
create_seq_entry("interrupts", 0, &proc_interrupts_operations);
create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
#ifdef CONFIG_MODULES
create_seq_entry("modules", 0, &proc_modules_operations);
create_seq_entry("ksyms", 0, &proc_ksyms_operations);
......
......@@ -24,13 +24,6 @@ struct kernel_stat {
unsigned int dk_drive_wio[DK_MAX_MAJOR][DK_MAX_DISK];
unsigned int dk_drive_rblk[DK_MAX_MAJOR][DK_MAX_DISK];
unsigned int dk_drive_wblk[DK_MAX_MAJOR][DK_MAX_DISK];
unsigned int pgpgin, pgpgout;
unsigned int pswpin, pswpout;
unsigned int pgalloc, pgfree;
unsigned int pgactivate, pgdeactivate;
unsigned int pgfault, pgmajfault;
unsigned int pgscan, pgsteal;
unsigned int pageoutrun, allocstall;
#if !defined(CONFIG_ARCH_S390)
unsigned int irqs[NR_CPUS][NR_IRQS];
#endif
......
......@@ -70,7 +70,8 @@
#define PG_direct 16 /* ->pte_chain points directly at pte */
/*
* Global page accounting. One instance per CPU.
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
*/
extern struct page_state {
unsigned long nr_dirty;
......@@ -80,9 +81,30 @@ extern struct page_state {
unsigned long nr_reverse_maps;
unsigned long nr_mapped;
unsigned long nr_slab;
#define GET_PAGE_STATE_LAST nr_slab
/*
* The below are zeroed by get_page_state(). Use get_full_page_state()
* to add up all these.
*/
unsigned long pgpgin;
unsigned long pgpgout;
unsigned long pswpin;
unsigned long pswpout;
unsigned long pgalloc;
unsigned long pgfree;
unsigned long pgactivate;
unsigned long pgdeactivate;
unsigned long pgfault;
unsigned long pgmajfault;
unsigned long pgscan;
unsigned long pgsteal;
unsigned long pageoutrun;
unsigned long allocstall;
} ____cacheline_aligned_in_smp page_states[NR_CPUS];
extern void get_page_state(struct page_state *ret);
extern void get_full_page_state(struct page_state *ret);
#define mod_page_state(member, delta) \
do { \
......
......@@ -554,8 +554,6 @@ static int init(void * unused)
unlock_kernel();
system_running = 1;
kstat.pgfree = 0;
if (open("/dev/console", O_RDWR, 0) < 0)
printk("Warning: unable to open an initial console.\n");
......
......@@ -1098,7 +1098,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
return NULL;
page_not_uptodate:
KERNEL_STAT_INC(pgmajfault);
inc_page_state(pgmajfault);
lock_page(page);
/* Did it get unhashed while we waited for it? */
......
......@@ -1216,7 +1216,7 @@ static int do_swap_page(struct mm_struct * mm,
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
KERNEL_STAT_INC(pgmajfault);
inc_page_state(pgmajfault);
}
mark_page_accessed(page);
......@@ -1461,7 +1461,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
current->state = TASK_RUNNING;
pgd = pgd_offset(mm, address);
KERNEL_STAT_INC(pgfault);
inc_page_state(pgfault);
/*
* We need the page table lock to synchronize with kswapd
* and the SMP-safe atomic PTE updates.
......
......@@ -13,7 +13,7 @@
*/
#include <linux/config.h>
#include <linux/kernel_stat.h>
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
......@@ -24,6 +24,7 @@
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
unsigned long totalram_pages;
unsigned long totalhigh_pages;
......@@ -86,7 +87,7 @@ void __free_pages_ok (struct page *page, unsigned int order)
struct page *base;
struct zone *zone;
KERNEL_STAT_ADD(pgfree, 1<<order);
mod_page_state(pgfree, 1<<order);
BUG_ON(PageLRU(page));
BUG_ON(PagePrivate(page));
......@@ -324,7 +325,7 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
if (gfp_mask & __GFP_WAIT)
might_sleep();
KERNEL_STAT_ADD(pgalloc, 1<<order);
mod_page_state(pgalloc, 1<<order);
zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
classzone = zones[0];
......@@ -397,7 +398,7 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
if (!(gfp_mask & __GFP_WAIT))
goto nopage;
KERNEL_STAT_INC(allocstall);
inc_page_state(allocstall);
page = balance_classzone(classzone, gfp_mask, order, &freed);
if (page)
return page;
......@@ -555,28 +556,39 @@ unsigned int nr_free_highpages (void)
struct page_state page_states[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(page_states);
void get_page_state(struct page_state *ret)
void __get_page_state(struct page_state *ret, int nr)
{
int pcpu;
int cpu;
memset(ret, 0, sizeof(*ret));
for (pcpu = 0; pcpu < NR_CPUS; pcpu++) {
struct page_state *ps;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
unsigned long *in, *out, off;
if (!cpu_online(pcpu))
if (!cpu_online(cpu))
continue;
ps = &page_states[pcpu];
ret->nr_dirty += ps->nr_dirty;
ret->nr_writeback += ps->nr_writeback;
ret->nr_pagecache += ps->nr_pagecache;
ret->nr_page_table_pages += ps->nr_page_table_pages;
ret->nr_reverse_maps += ps->nr_reverse_maps;
ret->nr_mapped += ps->nr_mapped;
ret->nr_slab += ps->nr_slab;
in = (unsigned long *)(page_states + cpu);
out = (unsigned long *)ret;
for (off = 0; off < nr; off++)
*out++ += *in++;
}
}
void get_page_state(struct page_state *ret)
{
int nr;
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
__get_page_state(ret, nr + 1);
}
void get_full_page_state(struct page_state *ret)
{
__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
}
void get_zone_counts(unsigned long *active, unsigned long *inactive)
{
struct zone *zone;
......@@ -1048,4 +1060,74 @@ struct seq_operations fragmentation_op = {
.show = frag_show,
};
static char *vmstat_text[] = {
"nr_dirty",
"nr_writeback",
"nr_pagecache",
"nr_page_table_pages",
"nr_reverse_maps",
"nr_mapped",
"nr_slab",
"pgpgin",
"pgpgout",
"pswpin",
"pswpout",
"pgalloc",
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
"pgscan",
"pgsteal",
"pageoutrun",
"allocstall",
};
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
struct page_state *ps;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
ps = kmalloc(sizeof(*ps), GFP_KERNEL);
m->private = ps;
if (!ps)
return ERR_PTR(-ENOMEM);
get_full_page_state(ps);
return (unsigned long *)ps + *pos;
}
static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
{
(*pos)++;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
return (unsigned long *)m->private + *pos;
}
static int vmstat_show(struct seq_file *m, void *arg)
{
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
return 0;
}
static void vmstat_stop(struct seq_file *m, void *arg)
{
kfree(m->private);
m->private = NULL;
}
struct seq_operations vmstat_op = {
.start = vmstat_start,
.next = vmstat_next,
.stop = vmstat_stop,
.show = vmstat_show,
};
#endif /* CONFIG_PROC_FS */
......@@ -100,7 +100,7 @@ int swap_writepage(struct page *page)
ret = -ENOMEM;
goto out;
}
kstat.pswpout++;
inc_page_state(pswpout);
SetPageWriteback(page);
unlock_page(page);
submit_bio(WRITE, bio);
......@@ -119,7 +119,7 @@ int swap_readpage(struct file *file, struct page *page)
ret = -ENOMEM;
goto out;
}
kstat.pswpin++;
inc_page_state(pswpin);
submit_bio(READ, bio);
out:
return ret;
......
......@@ -38,7 +38,7 @@ void activate_page(struct page *page)
del_page_from_inactive_list(zone, page);
SetPageActive(page);
add_page_to_active_list(zone, page);
KERNEL_STAT_INC(pgactivate);
inc_page_state(pgactivate);
}
spin_unlock_irq(&zone->lru_lock);
}
......
......@@ -312,8 +312,8 @@ shrink_list(struct list_head *page_list, int nr_pages,
list_splice(&ret_pages, page_list);
if (pagevec_count(&freed_pvec))
__pagevec_release_nonlru(&freed_pvec);
KERNEL_STAT_ADD(pgsteal, nr_pages_in - nr_pages);
KERNEL_STAT_ADD(pgactivate, pgactivate);
mod_page_state(pgsteal, nr_pages_in - nr_pages);
mod_page_state(pgactivate, pgactivate);
return nr_pages;
}
......@@ -380,7 +380,7 @@ shrink_cache(int nr_pages, struct zone *zone,
goto done;
max_scan -= nr_scan;
KERNEL_STAT_ADD(pgscan, nr_scan);
mod_page_state(pgscan, nr_scan);
nr_pages = shrink_list(&page_list, nr_pages,
gfp_mask, &max_scan, nr_mapped);
......@@ -527,8 +527,8 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in)
spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
KERNEL_STAT_ADD(pgscan, nr_pages_in - nr_pages);
KERNEL_STAT_ADD(pgdeactivate, pgdeactivate);
mod_page_state(pgscan, nr_pages_in - nr_pages);
mod_page_state(pgdeactivate, pgdeactivate);
}
static /* inline */ int
......@@ -641,7 +641,7 @@ try_to_free_pages(struct zone *classzone,
int priority = DEF_PRIORITY;
int nr_pages = SWAP_CLUSTER_MAX;
KERNEL_STAT_INC(pageoutrun);
inc_page_state(pageoutrun);
for (priority = DEF_PRIORITY; priority; priority--) {
int total_scanned = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment