Commit 5e6e5a12 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

huge tmpfs: shmem_is_huge(vma, inode, index)

Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
that a consistent set of checks can be applied, even when the inode is
accessed through read/write syscalls (with NULL vma) instead of mmaps (the
index argument is seldom of interest, but required by mount option
"huge=within_size").  Clean up and rearrange the checks a little.

This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes.

Replace a couple of 0s by explicit SHMEM_HUGE_NEVERs; and replace the
obscure !shmem_mapping() symlink check by explicit S_ISLNK() - nothing
else needs that symlink check, so leave it there in shmem_getpage_gfp().

Link: https://lkml.kernel.org/r/23a77889-2ddc-b030-75cd-44ca27fd4d1@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent acdd9f8e
...@@ -86,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); ...@@ -86,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
extern int shmem_unuse(unsigned int type, bool frontswap, extern int shmem_unuse(unsigned int type, bool frontswap,
unsigned long *fs_pages_to_unuse); unsigned long *fs_pages_to_unuse);
extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern bool shmem_is_huge(struct vm_area_struct *vma,
struct inode *inode, pgoff_t index);
static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
{
return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
}
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end); pgoff_t start, pgoff_t end);
...@@ -96,8 +101,6 @@ enum sgp_type { ...@@ -96,8 +101,6 @@ enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */ SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */ SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
SGP_CACHE, /* don't exceed i_size, may allocate page */ SGP_CACHE, /* don't exceed i_size, may allocate page */
SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
}; };
......
...@@ -471,39 +471,35 @@ static bool shmem_confirm_swap(struct address_space *mapping, ...@@ -471,39 +471,35 @@ static bool shmem_confirm_swap(struct address_space *mapping,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* ifdef here to avoid bloating shmem.o when not necessary */ /* ifdef here to avoid bloating shmem.o when not necessary */
static int shmem_huge __read_mostly; static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
bool shmem_huge_enabled(struct vm_area_struct *vma) bool shmem_is_huge(struct vm_area_struct *vma,
struct inode *inode, pgoff_t index)
{ {
struct inode *inode = file_inode(vma->vm_file);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
loff_t i_size; loff_t i_size;
pgoff_t off;
if ((vma->vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
return false;
if (shmem_huge == SHMEM_HUGE_FORCE)
return true;
if (shmem_huge == SHMEM_HUGE_DENY) if (shmem_huge == SHMEM_HUGE_DENY)
return false; return false;
switch (sbinfo->huge) { if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
case SHMEM_HUGE_NEVER: test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return false; return false;
if (shmem_huge == SHMEM_HUGE_FORCE)
return true;
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS: case SHMEM_HUGE_ALWAYS:
return true; return true;
case SHMEM_HUGE_WITHIN_SIZE: case SHMEM_HUGE_WITHIN_SIZE:
off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); index = round_up(index, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE); i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >= HPAGE_PMD_SIZE && if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
i_size >> PAGE_SHIFT >= off)
return true; return true;
fallthrough; fallthrough;
case SHMEM_HUGE_ADVISE: case SHMEM_HUGE_ADVISE:
/* TODO: implement fadvise() hints */ if (vma && (vma->vm_flags & VM_HUGEPAGE))
return (vma->vm_flags & VM_HUGEPAGE); return true;
fallthrough;
default: default:
VM_BUG_ON(1);
return false; return false;
} }
} }
...@@ -677,6 +673,12 @@ static long shmem_unused_huge_count(struct super_block *sb, ...@@ -677,6 +673,12 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY #define shmem_huge SHMEM_HUGE_DENY
bool shmem_is_huge(struct vm_area_struct *vma,
struct inode *inode, pgoff_t index)
{
return false;
}
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split) struct shrink_control *sc, unsigned long nr_to_split)
{ {
...@@ -1812,7 +1814,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1812,7 +1814,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct shmem_sb_info *sbinfo; struct shmem_sb_info *sbinfo;
struct mm_struct *charge_mm; struct mm_struct *charge_mm;
struct page *page; struct page *page;
enum sgp_type sgp_huge = sgp;
pgoff_t hindex = index; pgoff_t hindex = index;
gfp_t huge_gfp; gfp_t huge_gfp;
int error; int error;
...@@ -1821,8 +1822,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1821,8 +1822,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
return -EFBIG; return -EFBIG;
if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
sgp = SGP_CACHE;
repeat: repeat:
if (sgp <= SGP_CACHE && if (sgp <= SGP_CACHE &&
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
...@@ -1886,36 +1885,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1886,36 +1885,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
return 0; return 0;
} }
/* shmem_symlink() */ /* Never use a huge page for shmem_symlink() */
if (!shmem_mapping(mapping)) if (S_ISLNK(inode->i_mode))
goto alloc_nohuge;
if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
goto alloc_nohuge; goto alloc_nohuge;
if (shmem_huge == SHMEM_HUGE_FORCE) if (!shmem_is_huge(vma, inode, index))
goto alloc_huge;
switch (sbinfo->huge) {
case SHMEM_HUGE_NEVER:
goto alloc_nohuge; goto alloc_nohuge;
case SHMEM_HUGE_WITHIN_SIZE: {
loff_t i_size;
pgoff_t off;
off = round_up(index, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off)
goto alloc_huge;
fallthrough;
}
case SHMEM_HUGE_ADVISE:
if (sgp_huge == SGP_HUGE)
goto alloc_huge;
/* TODO: implement fadvise() hints */
goto alloc_nohuge;
}
alloc_huge:
huge_gfp = vma_thp_gfp_mask(vma); huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp); huge_gfp = limit_gfp_mask(huge_gfp, gfp);
page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
...@@ -2071,7 +2046,6 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2071,7 +2046,6 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping); gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
enum sgp_type sgp;
int err; int err;
vm_fault_t ret = VM_FAULT_LOCKED; vm_fault_t ret = VM_FAULT_LOCKED;
...@@ -2134,15 +2108,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2134,15 +2108,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
} }
sgp = SGP_CACHE; err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
if ((vma->vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
sgp = SGP_NOHUGE;
else if (vma->vm_flags & VM_HUGEPAGE)
sgp = SGP_HUGE;
err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
gfp, vma, vmf, &ret); gfp, vma, vmf, &ret);
if (err) if (err)
return vmf_error(err); return vmf_error(err);
...@@ -3950,7 +3916,7 @@ int __init shmem_init(void) ...@@ -3950,7 +3916,7 @@ int __init shmem_init(void)
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else else
shmem_huge = 0; /* just in case it was patched */ shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
#endif #endif
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment