Commit fd3b1bc3 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Andrew Morton

mm/madvise: fix madvise_pageout for private file mappings

When MADV_PAGEOUT is called on a private file mapping VMA region, we bail
out early if the process is neither owner nor write capable of the file. 
However, this VMA may have both private/shared clean pages and private
dirty pages.  The opportunity of paging out the private dirty pages (Anon
pages) is missed.  Fix this behavior by allowing private file mappings
pageout further and perform the file access check along with PageAnon()
during page walk.

We observe ~10% improvement in zram usage, thus leaving more available
memory on a 4GB RAM system running Android.

[quic_pkondeti@quicinc.com: v2]
  Link: https://lkml.kernel.org/r/1669962597-27724-1-git-send-email-quic_pkondeti@quicinc.com
Link: https://lkml.kernel.org/r/1667971116-12900-1-git-send-email-quic_pkondeti@quicinc.comSigned-off-by: default avatarPavankumar Kondeti <quic_pkondeti@quicinc.com>
Cc: Charan Teja Kalla <quic_charante@quicinc.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4c9473e8
...@@ -318,6 +318,21 @@ static long madvise_willneed(struct vm_area_struct *vma, ...@@ -318,6 +318,21 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0; return 0;
} }
static inline bool can_do_file_pageout(struct vm_area_struct *vma)
{
if (!vma->vm_file)
return false;
/*
* paging out pagecache only for non-anonymous mappings that correspond
* to the files the calling process could (if tried) open for writing;
* otherwise we'd be including shared non-exclusive mappings, which
* opens a side channel.
*/
return inode_owner_or_capable(&init_user_ns,
file_inode(vma->vm_file)) ||
file_permission(vma->vm_file, MAY_WRITE) == 0;
}
static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct mm_walk *walk) struct mm_walk *walk)
...@@ -331,10 +346,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -331,10 +346,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
spinlock_t *ptl; spinlock_t *ptl;
struct page *page = NULL; struct page *page = NULL;
LIST_HEAD(page_list); LIST_HEAD(page_list);
bool pageout_anon_only_filter;
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; return -EINTR;
pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
!can_do_file_pageout(vma);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge(*pmd)) {
pmd_t orig_pmd; pmd_t orig_pmd;
...@@ -361,6 +380,9 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -361,6 +380,9 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (page_mapcount(page) != 1) if (page_mapcount(page) != 1)
goto huge_unlock; goto huge_unlock;
if (pageout_anon_only_filter && !PageAnon(page))
goto huge_unlock;
if (next - addr != HPAGE_PMD_SIZE) { if (next - addr != HPAGE_PMD_SIZE) {
int err; int err;
...@@ -429,6 +451,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -429,6 +451,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (PageTransCompound(page)) { if (PageTransCompound(page)) {
if (page_mapcount(page) != 1) if (page_mapcount(page) != 1)
break; break;
if (pageout_anon_only_filter && !PageAnon(page))
break;
get_page(page); get_page(page);
if (!trylock_page(page)) { if (!trylock_page(page)) {
put_page(page); put_page(page);
...@@ -456,6 +480,9 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -456,6 +480,9 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (!PageLRU(page) || page_mapcount(page) != 1) if (!PageLRU(page) || page_mapcount(page) != 1)
continue; continue;
if (pageout_anon_only_filter && !PageAnon(page))
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page); VM_BUG_ON_PAGE(PageTransCompound(page), page);
if (pte_young(ptent)) { if (pte_young(ptent)) {
...@@ -550,23 +577,6 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb, ...@@ -550,23 +577,6 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
} }
static inline bool can_do_pageout(struct vm_area_struct *vma)
{
if (vma_is_anonymous(vma))
return true;
if (!vma->vm_file)
return false;
/*
* paging out pagecache only for non-anonymous mappings that correspond
* to the files the calling process could (if tried) open for writing;
* otherwise we'd be including shared non-exclusive mappings, which
* opens a side channel.
*/
return inode_owner_or_capable(&init_user_ns,
file_inode(vma->vm_file)) ||
file_permission(vma->vm_file, MAY_WRITE) == 0;
}
static long madvise_pageout(struct vm_area_struct *vma, static long madvise_pageout(struct vm_area_struct *vma,
struct vm_area_struct **prev, struct vm_area_struct **prev,
unsigned long start_addr, unsigned long end_addr) unsigned long start_addr, unsigned long end_addr)
...@@ -578,7 +588,14 @@ static long madvise_pageout(struct vm_area_struct *vma, ...@@ -578,7 +588,14 @@ static long madvise_pageout(struct vm_area_struct *vma,
if (!can_madv_lru_vma(vma)) if (!can_madv_lru_vma(vma))
return -EINVAL; return -EINVAL;
if (!can_do_pageout(vma)) /*
* If the VMA belongs to a private file mapping, there can be private
* dirty pages which can be paged out if even this process is neither
* owner nor write capable of the file. We allow private file mappings
* further to pageout dirty anon pages.
*/
if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
(vma->vm_flags & VM_MAYSHARE)))
return 0; return 0;
lru_add_drain(); lru_add_drain();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment