Commit 6ac0a8d7 authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] O(1) proc_pid_statm()

Merely removing down_read(&mm->mmap_sem) from task_vsize() is too
half-assed to let stand. The following patch removes the vma iteration
as well as the down_read(&mm->mmap_sem) from both task_mem() and
task_statm() and callers for the CONFIG_MMU=y case in favor of
accounting the various stats reported at the times of vma creation,
destruction, and modification. Unlike the 2.4.x patches of the same
name, this has no per-pte-modification overhead whatsoever.

This patch quashes end user complaints of top(1) being slow as well as
kernel hacker complaints of per-pte accounting overhead simultaneously.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 384aabda
......@@ -187,7 +187,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
PAGE_COPY_EXEC: PAGE_COPY;
insert_vm_struct(current->mm, mpnt);
current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
}
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
......
......@@ -2352,7 +2352,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
insert_vm_struct(mm, vma);
mm->total_vm += size >> PAGE_SHIFT;
vm_stat_account(vma);
up_write(&task->mm->mmap_sem);
/*
......
......@@ -40,6 +40,7 @@ expand_backing_store (struct vm_area_struct *vma, unsigned long address)
vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow;
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
return 0;
}
......
......@@ -69,7 +69,7 @@ int setup_arg_pages32(struct linux_binprm *bprm, int executable_stack)
mpnt->vm_page_prot = PAGE_COPY;
mpnt->vm_flags = VM_STACK_FLAGS;
insert_vm_struct(mm, mpnt);
mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
......
......@@ -368,7 +368,7 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
PAGE_COPY_EXEC : PAGE_COPY;
insert_vm_struct(mm, mpnt);
mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
......
......@@ -433,7 +433,7 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
mpnt->vm_flags |= mm->def_flags;
mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
insert_vm_struct(mm, mpnt);
mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
......
......@@ -278,7 +278,6 @@ static inline char *task_cap(struct task_struct *p, char *buffer)
cap_t(p->cap_effective));
}
extern char *task_mem(struct mm_struct *, char *);
int proc_pid_status(struct task_struct *task, char * buffer)
{
char * orig = buffer;
......@@ -409,17 +408,13 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
return res;
}
extern int task_statm(struct mm_struct *, int *, int *, int *, int *);
int proc_pid_statm(struct task_struct *task, char *buffer)
{
int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
if (mm) {
down_read(&mm->mmap_sem);
size = task_statm(mm, &shared, &text, &data, &resident);
up_read(&mm->mmap_sem);
mmput(mm);
}
......
......@@ -6,27 +6,11 @@
char *task_mem(struct mm_struct *mm, char *buffer)
{
unsigned long data = 0, stack = 0, exec = 0, lib = 0;
struct vm_area_struct *vma;
unsigned long data, text, lib;
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long len = (vma->vm_end - vma->vm_start) >> 10;
if (!vma->vm_file) {
data += len;
if (vma->vm_flags & VM_GROWSDOWN)
stack += len;
continue;
}
if (vma->vm_flags & VM_WRITE)
continue;
if (vma->vm_flags & VM_EXEC) {
exec += len;
if (vma->vm_flags & VM_EXECUTABLE)
continue;
lib += len;
}
}
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
text = (mm->end_code - mm->start_code) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
buffer += sprintf(buffer,
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
......@@ -38,9 +22,8 @@ char *task_mem(struct mm_struct *mm, char *buffer)
mm->total_vm << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
mm->rss << (PAGE_SHIFT-10),
data - stack, stack,
exec - lib, lib);
up_read(&mm->mmap_sem);
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib);
return buffer;
}
......@@ -52,28 +35,11 @@ unsigned long task_vsize(struct mm_struct *mm)
int task_statm(struct mm_struct *mm, int *shared, int *text,
int *data, int *resident)
{
struct vm_area_struct *vma;
int size = 0;
*shared = mm->shared_vm;
*text = mm->exec_vm - ((mm->end_code - mm->start_code) >> PAGE_SHIFT);
*data = mm->total_vm - mm->shared_vm;
*resident = mm->rss;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
int pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
size += pages;
if (is_vm_hugetlb_page(vma)) {
if (!(vma->vm_flags & VM_DONTCOPY))
*shared += pages;
continue;
}
if (vma->vm_file)
*shared += pages;
if (vma->vm_flags & VM_EXECUTABLE)
*text += pages;
else
*data += pages;
}
return size;
return mm->total_vm;
}
static int show_map(struct seq_file *m, void *v)
......
......@@ -83,6 +83,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
struct mm_tblock_struct *tbp;
int size = kobjsize(mm);
down_read(&mm->mmap_sem);
for (tbp = &mm->context.tblock; tbp; tbp = tbp->next) {
if (tbp->next)
size += kobjsize(tbp->next);
......@@ -94,7 +95,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
size += (*text = mm->end_code - mm->start_code);
size += (*data = mm->start_stack - mm->start_data);
up_read(&mm->mmap_sem);
*resident = size;
return size;
}
......
......@@ -752,6 +752,19 @@ extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write);
extern int remap_page_range(struct vm_area_struct *vma, unsigned long from,
unsigned long to, unsigned long size, pgprot_t prot);
void __vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct vm_area_struct *vma)
{
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
vma_pages(vma));
}
static inline void vm_stat_unaccount(struct vm_area_struct *vma)
{
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
-vma_pages(vma));
}
#ifndef CONFIG_DEBUG_PAGEALLOC
static inline void
......
......@@ -93,6 +93,8 @@ struct dentry *proc_pid_unhash(struct task_struct *p);
void proc_pid_flush(struct dentry *proc_dentry);
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
unsigned long task_vsize(struct mm_struct *);
int task_statm(struct mm_struct *, int *, int *, int *, int *);
char *task_mem(struct mm_struct *, char *);
extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent);
......
......@@ -224,8 +224,8 @@ struct mm_struct {
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long rss, total_vm, locked_vm;
unsigned long def_flags;
unsigned long rss, total_vm, locked_vm, shared_vm;
unsigned long exec_vm, stack_vm, def_flags;
unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
......
......@@ -729,6 +729,28 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
return NULL;
}
void __vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
const unsigned long stack_flags
= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
#ifdef CONFIG_HUGETLB
if (flags & VM_HUGETLB) {
if (!(flags & VM_DONTCOPY))
mm->shared_vm += pages;
return;
}
#endif /* CONFIG_HUGETLB */
if (file)
mm->shared_vm += pages;
else if (flags & stack_flags)
mm->stack_vm += pages;
if (flags & VM_EXEC)
mm->exec_vm += pages;
}
/*
* The caller must hold down_write(current->mm->mmap_sem).
*/
......@@ -987,6 +1009,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
pgoff, flags & MAP_NONBLOCK);
down_write(&mm->mmap_sem);
}
__vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
return addr;
unmap_and_free_vma:
......@@ -1330,6 +1353,7 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow;
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
anon_vma_unlock(vma);
return 0;
}
......@@ -1392,6 +1416,7 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow;
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
anon_vma_unlock(vma);
return 0;
}
......@@ -1497,6 +1522,7 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
area->vm_mm->total_vm -= len >> PAGE_SHIFT;
if (area->vm_flags & VM_LOCKED)
area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vm_stat_unaccount(area);
area->vm_mm->unmap_area(area);
remove_vm_struct(area);
}
......
......@@ -175,9 +175,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
vm_stat_unaccount(vma);
vma->vm_flags = newflags;
vma->vm_page_prot = newprot;
change_protection(vma, start, end, newprot);
vm_stat_account(vma);
return 0;
fail:
......
......@@ -224,6 +224,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
}
mm->total_vm += new_len >> PAGE_SHIFT;
__vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
mm->locked_vm += new_len >> PAGE_SHIFT;
if (new_len > old_len)
......@@ -360,6 +361,8 @@ unsigned long do_mremap(unsigned long addr,
addr + new_len, vma->vm_pgoff, NULL);
current->mm->total_vm += pages;
__vm_stat_account(vma->vm_mm, vma->vm_flags,
vma->vm_file, pages);
if (vma->vm_flags & VM_LOCKED) {
current->mm->locked_vm += pages;
make_pages_present(addr + old_len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment