Commit 7a7256d5 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

shmem: convert shmem_mfill_atomic_pte() to use a folio

Assert that this is a single-page folio as there are several assumptions
in here that it's exactly PAGE_SIZE bytes large.  Saves several calls to
compound_head() and removes the last caller of shmem_alloc_page().

Link: https://lkml.kernel.org/r/20220902194653.1739778-18-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 65995918
...@@ -2374,12 +2374,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir, ...@@ -2374,12 +2374,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
} }
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
return &shmem_alloc_folio(gfp, info, index)->page;
}
int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
...@@ -2395,7 +2389,6 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2395,7 +2389,6 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
void *page_kaddr; void *page_kaddr;
struct folio *folio; struct folio *folio;
struct page *page;
int ret; int ret;
pgoff_t max_off; pgoff_t max_off;
...@@ -2414,53 +2407,53 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2414,53 +2407,53 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (!*pagep) { if (!*pagep) {
ret = -ENOMEM; ret = -ENOMEM;
page = shmem_alloc_page(gfp, info, pgoff); folio = shmem_alloc_folio(gfp, info, pgoff);
if (!page) if (!folio)
goto out_unacct_blocks; goto out_unacct_blocks;
if (!zeropage) { /* COPY */ if (!zeropage) { /* COPY */
page_kaddr = kmap_atomic(page); page_kaddr = kmap_local_folio(folio, 0);
ret = copy_from_user(page_kaddr, ret = copy_from_user(page_kaddr,
(const void __user *)src_addr, (const void __user *)src_addr,
PAGE_SIZE); PAGE_SIZE);
kunmap_atomic(page_kaddr); kunmap_local(page_kaddr);
/* fallback to copy_from_user outside mmap_lock */ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) { if (unlikely(ret)) {
*pagep = page; *pagep = &folio->page;
ret = -ENOENT; ret = -ENOENT;
/* don't free the page */ /* don't free the page */
goto out_unacct_blocks; goto out_unacct_blocks;
} }
flush_dcache_page(page); flush_dcache_folio(folio);
} else { /* ZEROPAGE */ } else { /* ZEROPAGE */
clear_user_highpage(page, dst_addr); clear_user_highpage(&folio->page, dst_addr);
} }
} else { } else {
page = *pagep; folio = page_folio(*pagep);
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
*pagep = NULL; *pagep = NULL;
} }
VM_BUG_ON(PageLocked(page)); VM_BUG_ON(folio_test_locked(folio));
VM_BUG_ON(PageSwapBacked(page)); VM_BUG_ON(folio_test_swapbacked(folio));
__SetPageLocked(page); __folio_set_locked(folio);
__SetPageSwapBacked(page); __folio_set_swapbacked(folio);
__SetPageUptodate(page); __folio_mark_uptodate(folio);
ret = -EFAULT; ret = -EFAULT;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(pgoff >= max_off)) if (unlikely(pgoff >= max_off))
goto out_release; goto out_release;
folio = page_folio(page);
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm); gfp & GFP_RECLAIM_MASK, dst_mm);
if (ret) if (ret)
goto out_release; goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
page, true, wp_copy); &folio->page, true, wp_copy);
if (ret) if (ret)
goto out_delete_from_cache; goto out_delete_from_cache;
...@@ -2470,13 +2463,13 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2470,13 +2463,13 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
unlock_page(page); folio_unlock(folio);
return 0; return 0;
out_delete_from_cache: out_delete_from_cache:
delete_from_page_cache(page); filemap_remove_folio(folio);
out_release: out_release:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
out_unacct_blocks: out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1); shmem_inode_unacct_blocks(inode, 1);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment