Commit 595cd8f2 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm/ksm: handle protnone saved writes when making page write protect

Without this KSM will consider the page write protected, but a numa
fault can later mark the page writable.  This can result in memory
corruption.

Link: http://lkml.kernel.org/r/1487498625-10891-3-git-send-email-aneesh.kumar@linux.vnet.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 288bc549
...@@ -233,6 +233,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres ...@@ -233,6 +233,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define pte_mk_savedwrite pte_mkwrite #define pte_mk_savedwrite pte_mkwrite
#endif #endif
#ifndef pte_clear_savedwrite
#define pte_clear_savedwrite pte_wrprotect
#endif
#ifndef pmd_savedwrite #ifndef pmd_savedwrite
#define pmd_savedwrite pmd_write #define pmd_savedwrite pmd_write
#endif #endif
...@@ -241,6 +245,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres ...@@ -241,6 +245,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define pmd_mk_savedwrite pmd_mkwrite #define pmd_mk_savedwrite pmd_mkwrite
#endif #endif
#ifndef pmd_clear_savedwrite
#define pmd_clear_savedwrite pmd_wrprotect
#endif
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pmdp_set_wrprotect(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm,
......
...@@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, ...@@ -880,7 +880,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
goto out_unlock; goto out_unlock;
if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) { if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
(pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
pte_t entry; pte_t entry;
swapped = PageSwapCache(page); swapped = PageSwapCache(page);
...@@ -905,6 +906,10 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, ...@@ -905,6 +906,10 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
} }
if (pte_dirty(entry)) if (pte_dirty(entry))
set_page_dirty(page); set_page_dirty(page);
if (pte_protnone(entry))
entry = pte_mkclean(pte_clear_savedwrite(entry));
else
entry = pte_mkclean(pte_wrprotect(entry)); entry = pte_mkclean(pte_wrprotect(entry));
set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment