Commit c03544dd authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[PATCH] prevent ptrace from altering page permissions

From: Roland McGrath <roland@redhat.com>

Under some circumstances, ptrace PEEK/POKE_TEXT can cause page permissions
to be permanently changed.  Thsi causes changes in application behaviour
when run under gdb.

Fix that by only marking the pte as writeable if the vma is marked for
writing.  A write fault thus unshares the page but doesn't necessarily make
it writeable.
parent a177d62c
...@@ -748,7 +748,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -748,7 +748,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
struct page *map; struct page *map;
while (!(map = follow_page(mm, start, write))) { int lookup_write = write;
while (!(map = follow_page(mm, start, lookup_write))) {
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
switch (handle_mm_fault(mm,vma,start,write)) { switch (handle_mm_fault(mm,vma,start,write)) {
case VM_FAULT_MINOR: case VM_FAULT_MINOR:
...@@ -764,6 +765,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -764,6 +765,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
default: default:
BUG(); BUG();
} }
/*
* Now that we have performed a write fault
* and surely no longer have a shared page we
* shouldn't write, we shouldn't ignore an
* unwritable page in the page table if
* we are forcing write access.
*/
lookup_write = write && !force;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
} }
if (pages) { if (pages) {
...@@ -952,6 +961,19 @@ int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned lo ...@@ -952,6 +961,19 @@ int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned lo
EXPORT_SYMBOL(remap_page_range); EXPORT_SYMBOL(remap_page_range);
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
* servicing faults for write access. In the normal case, do always want
* pte_mkwrite. But get_user_pages can cause write faults for mappings
* that do not have writing enabled, when used by access_process_vm.
*/
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pte = pte_mkwrite(pte);
return pte;
}
/* /*
* We hold the mm semaphore for reading and vma->vm_mm->page_table_lock * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
*/ */
...@@ -961,7 +983,8 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page ...@@ -961,7 +983,8 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page
pte_t entry; pte_t entry;
flush_cache_page(vma, address); flush_cache_page(vma, address);
entry = pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))); entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
vma);
ptep_establish(vma, address, page_table, entry); ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
} }
...@@ -1013,7 +1036,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1013,7 +1036,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unlock_page(old_page); unlock_page(old_page);
if (reuse) { if (reuse) {
flush_cache_page(vma, address); flush_cache_page(vma, address);
entry = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))); entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
vma);
ptep_establish(vma, address, page_table, entry); ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
pte_unmap(page_table); pte_unmap(page_table);
...@@ -1281,7 +1305,7 @@ static int do_swap_page(struct mm_struct * mm, ...@@ -1281,7 +1305,7 @@ static int do_swap_page(struct mm_struct * mm,
mm->rss++; mm->rss++;
pte = mk_pte(page, vma->vm_page_prot); pte = mk_pte(page, vma->vm_page_prot);
if (write_access && can_share_swap_page(page)) if (write_access && can_share_swap_page(page))
pte = pte_mkdirty(pte_mkwrite(pte)); pte = maybe_mkwrite(pte_mkdirty(pte), vma);
unlock_page(page); unlock_page(page);
flush_icache_page(vma, page); flush_icache_page(vma, page);
...@@ -1348,7 +1372,9 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1348,7 +1372,9 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out; goto out;
} }
mm->rss++; mm->rss++;
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
vma->vm_page_prot)),
vma);
lru_cache_add_active(page); lru_cache_add_active(page);
mark_page_accessed(page); mark_page_accessed(page);
} }
...@@ -1464,7 +1490,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1464,7 +1490,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
flush_icache_page(vma, new_page); flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot); entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access) if (write_access)
entry = pte_mkwrite(pte_mkdirty(entry)); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
set_pte(page_table, entry); set_pte(page_table, entry);
pte_chain = page_add_rmap(new_page, page_table, pte_chain); pte_chain = page_add_rmap(new_page, page_table, pte_chain);
pte_unmap(page_table); pte_unmap(page_table);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment