Commit 258f669e authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm: /proc/pid/smaps_rollup: convert to single value seq_file

The /proc/pid/smaps_rollup file is currently implemented via the
m_start/m_next/m_stop seq_file iterators shared with the other maps files,
that iterate over vma's.  However, the rollup file doesn't print anything
for each vma, only accumulate the stats.

There are some issues with the current code as reported in [1] - the
accumulated stats can get skewed if seq_file start()/stop() op is called
multiple times, if show() is called multiple times, and after seeks to
non-zero position.

Patch [1] fixed those within existing design, but I believe it is
fundamentally wrong to expose the vma iterators to the seq_file mechanism
when smaps_rollup shows logically a single set of values for the whole
address space.

This patch thus refactors the code to provide a single "value" at offset
0, with vma iteration to gather the stats done internally.  This fixes the
situations where results are skewed, and simplifies the code, especially
in show_smap(), at the expense of somewhat less code reuse.

[1] https://marc.info/?l=linux-mm&m=151927723128134&w=2

[vbabka@suse.c: use seq_file infrastructure]
  Link: http://lkml.kernel.org/r/bf4525b0-fd5b-4c4c-2cb3-adee3dd95a48@suse.cz
Link: http://lkml.kernel.org/r/20180723111933.15443-5-vbabka@suse.czSigned-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reported-by: default avatarDaniel Colascione <dancol@google.com>
Reviewed-by: default avatarAlexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f1547959
...@@ -285,7 +285,6 @@ struct proc_maps_private { ...@@ -285,7 +285,6 @@ struct proc_maps_private {
struct inode *inode; struct inode *inode;
struct task_struct *task; struct task_struct *task;
struct mm_struct *mm; struct mm_struct *mm;
struct mem_size_stats *rollup;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
struct vm_area_struct *tail_vma; struct vm_area_struct *tail_vma;
#endif #endif
......
...@@ -247,7 +247,6 @@ static int proc_map_release(struct inode *inode, struct file *file) ...@@ -247,7 +247,6 @@ static int proc_map_release(struct inode *inode, struct file *file)
if (priv->mm) if (priv->mm)
mmdrop(priv->mm); mmdrop(priv->mm);
kfree(priv->rollup);
return seq_release_private(inode, file); return seq_release_private(inode, file);
} }
...@@ -404,7 +403,6 @@ const struct file_operations proc_pid_maps_operations = { ...@@ -404,7 +403,6 @@ const struct file_operations proc_pid_maps_operations = {
#ifdef CONFIG_PROC_PAGE_MONITOR #ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats { struct mem_size_stats {
bool first;
unsigned long resident; unsigned long resident;
unsigned long shared_clean; unsigned long shared_clean;
unsigned long shared_dirty; unsigned long shared_dirty;
...@@ -418,7 +416,6 @@ struct mem_size_stats { ...@@ -418,7 +416,6 @@ struct mem_size_stats {
unsigned long swap; unsigned long swap;
unsigned long shared_hugetlb; unsigned long shared_hugetlb;
unsigned long private_hugetlb; unsigned long private_hugetlb;
unsigned long first_vma_start;
u64 pss; u64 pss;
u64 pss_locked; u64 pss_locked;
u64 swap_pss; u64 swap_pss;
...@@ -775,57 +772,75 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss) ...@@ -775,57 +772,75 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss)
static int show_smap(struct seq_file *m, void *v) static int show_smap(struct seq_file *m, void *v)
{ {
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v; struct vm_area_struct *vma = v;
struct mem_size_stats mss_stack; struct mem_size_stats mss;
struct mem_size_stats *mss;
memset(&mss, 0, sizeof(mss));
smap_gather_stats(vma, &mss);
show_map_vma(m, vma);
SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
seq_puts(m, " kB\n");
__show_smap(m, &mss);
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
show_smap_vma_flags(m, vma);
m_cache_vma(m, vma);
return 0;
}
static int show_smaps_rollup(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct mem_size_stats mss;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long last_vma_end = 0;
int ret = 0; int ret = 0;
bool rollup_mode;
bool last_vma;
if (priv->rollup) {
rollup_mode = true;
mss = priv->rollup;
if (mss->first) {
mss->first_vma_start = vma->vm_start;
mss->first = false;
}
last_vma = !m_next_vma(priv, vma);
} else {
rollup_mode = false;
memset(&mss_stack, 0, sizeof(mss_stack));
mss = &mss_stack;
}
smap_gather_stats(vma, mss); priv->task = get_proc_task(priv->inode);
if (!priv->task)
return -ESRCH;
if (!rollup_mode) { mm = priv->mm;
show_map_vma(m, vma); if (!mm || !mmget_not_zero(mm)) {
} else if (last_vma) { ret = -ESRCH;
show_vma_header_prefix( goto out_put_task;
m, mss->first_vma_start, vma->vm_end, 0, 0, 0, 0);
seq_pad(m, ' ');
seq_puts(m, "[rollup]\n");
} else {
ret = SEQ_SKIP;
} }
if (!rollup_mode) { memset(&mss, 0, sizeof(mss));
SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
seq_puts(m, " kB\n");
}
if (!rollup_mode || last_vma) down_read(&mm->mmap_sem);
__show_smap(m, mss); hold_task_mempolicy(priv);
if (!rollup_mode) { for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
if (arch_pkeys_enabled()) smap_gather_stats(vma, &mss);
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); last_vma_end = vma->vm_end;
show_smap_vma_flags(m, vma);
} }
m_cache_vma(m, vma);
show_vma_header_prefix(m, priv->mm->mmap->vm_start,
last_vma_end, 0, 0, 0, 0);
seq_pad(m, ' ');
seq_puts(m, "[rollup]\n");
__show_smap(m, &mss);
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
mmput(mm);
out_put_task:
put_task_struct(priv->task);
priv->task = NULL;
return ret; return ret;
} }
#undef SEQ_PUT_DEC #undef SEQ_PUT_DEC
...@@ -842,23 +857,45 @@ static int pid_smaps_open(struct inode *inode, struct file *file) ...@@ -842,23 +857,45 @@ static int pid_smaps_open(struct inode *inode, struct file *file)
return do_maps_open(inode, file, &proc_pid_smaps_op); return do_maps_open(inode, file, &proc_pid_smaps_op);
} }
static int pid_smaps_rollup_open(struct inode *inode, struct file *file) static int smaps_rollup_open(struct inode *inode, struct file *file)
{ {
struct seq_file *seq; int ret;
struct proc_maps_private *priv; struct proc_maps_private *priv;
int ret = do_maps_open(inode, file, &proc_pid_smaps_op);
priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
if (ret < 0) if (!priv)
return ret;
seq = file->private_data;
priv = seq->private;
priv->rollup = kzalloc(sizeof(*priv->rollup), GFP_KERNEL);
if (!priv->rollup) {
proc_map_release(inode, file);
return -ENOMEM; return -ENOMEM;
ret = single_open(file, show_smaps_rollup, priv);
if (ret)
goto out_free;
priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR(priv->mm)) {
ret = PTR_ERR(priv->mm);
single_release(inode, file);
goto out_free;
} }
priv->rollup->first = true;
return 0; return 0;
out_free:
kfree(priv);
return ret;
}
static int smaps_rollup_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct proc_maps_private *priv = seq->private;
if (priv->mm)
mmdrop(priv->mm);
kfree(priv);
return single_release(inode, file);
} }
const struct file_operations proc_pid_smaps_operations = { const struct file_operations proc_pid_smaps_operations = {
...@@ -869,10 +906,10 @@ const struct file_operations proc_pid_smaps_operations = { ...@@ -869,10 +906,10 @@ const struct file_operations proc_pid_smaps_operations = {
}; };
const struct file_operations proc_pid_smaps_rollup_operations = { const struct file_operations proc_pid_smaps_rollup_operations = {
.open = pid_smaps_rollup_open, .open = smaps_rollup_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = proc_map_release, .release = smaps_rollup_release,
}; };
enum clear_refs_types { enum clear_refs_types {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment