Commit bdfaa2ee authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

uprobes: Rename the "struct page *" args of __replace_page()

Purely cosmetic, no changes in the compiled code.

Perhaps it is just me but I can hardly read __replace_page() because I can't
distinguish "page" from "kpage" and because I need to look at the caller to
to ensure that, say, kpage is really the new page and the code is correct.
Rename them to old_page and new_page, this matches the caller.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Brenden Blanco <bblanco@plumgrid.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Link: http://lkml.kernel.org/r/20160817153704.GC29724@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bc06f00d
...@@ -150,7 +150,7 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) ...@@ -150,7 +150,7 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
* Returns 0 on success, -EFAULT on failure. * Returns 0 on success, -EFAULT on failure.
*/ */
static int __replace_page(struct vm_area_struct *vma, unsigned long addr, static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, struct page *kpage) struct page *old_page, struct page *new_page)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl; spinlock_t *ptl;
...@@ -161,49 +161,49 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -161,49 +161,49 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
const unsigned long mmun_end = addr + PAGE_SIZE; const unsigned long mmun_end = addr + PAGE_SIZE;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg, err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
false); false);
if (err) if (err)
return err; return err;
/* For try_to_free_swap() and munlock_vma_page() below */ /* For try_to_free_swap() and munlock_vma_page() below */
lock_page(page); lock_page(old_page);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN; err = -EAGAIN;
ptep = page_check_address(page, mm, addr, &ptl, 0); ptep = page_check_address(old_page, mm, addr, &ptl, 0);
if (!ptep) { if (!ptep) {
mem_cgroup_cancel_charge(kpage, memcg, false); mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock; goto unlock;
} }
get_page(kpage); get_page(new_page);
page_add_new_anon_rmap(kpage, vma, addr, false); page_add_new_anon_rmap(new_page, vma, addr, false);
mem_cgroup_commit_charge(kpage, memcg, false, false); mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(kpage, vma); lru_cache_add_active_or_unevictable(new_page, vma);
if (!PageAnon(page)) { if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(page)); dec_mm_counter(mm, mm_counter_file(old_page));
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
} }
flush_cache_page(vma, addr, pte_pfn(*ptep)); flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush_notify(vma, addr, ptep); ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(page, false); page_remove_rmap(old_page, false);
if (!page_mapped(page)) if (!page_mapped(old_page))
try_to_free_swap(page); try_to_free_swap(old_page);
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
munlock_vma_page(page); munlock_vma_page(old_page);
put_page(page); put_page(old_page);
err = 0; err = 0;
unlock: unlock:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(page); unlock_page(old_page);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment