filemap: Convert filemap_fault to folio

Instead of converting back-and-forth between the actual page and
the head page, just convert once at the end of the function where we
set the vmf->page.  Saves 241 bytes of text, or 15% of the size of
filemap_fault().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 79598ced
...@@ -2898,21 +2898,20 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, ...@@ -2898,21 +2898,20 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define MMAP_LOTSAMISS (100) #define MMAP_LOTSAMISS (100)
/* /*
* lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
* @vmf - the vm_fault for this fault. * @vmf - the vm_fault for this fault.
* @page - the page to lock. * @folio - the folio to lock.
* @fpin - the pointer to the file we may pin (or is already pinned). * @fpin - the pointer to the file we may pin (or is already pinned).
* *
* This works similar to lock_page_or_retry in that it can drop the mmap_lock. * This works similar to lock_folio_or_retry in that it can drop the
* It differs in that it actually returns the page locked if it returns 1 and 0 * mmap_lock. It differs in that it actually returns the folio locked
* if it couldn't lock the page. If we did have to drop the mmap_lock then fpin * if it returns 1 and 0 if it couldn't lock the folio. If we did have
* will point to the pinned file and needs to be fput()'ed at a later point. * to drop the mmap_lock then fpin will point to the pinned file and
* needs to be fput()'ed at a later point.
*/ */
static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
struct file **fpin) struct file **fpin)
{ {
struct folio *folio = page_folio(page);
if (folio_trylock(folio)) if (folio_trylock(folio))
return 1; return 1;
...@@ -3038,7 +3037,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, ...@@ -3038,7 +3037,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
* vma->vm_mm->mmap_lock must be held on entry. * vma->vm_mm->mmap_lock must be held on entry.
* *
* If our return value has VM_FAULT_RETRY set, it's because the mmap_lock * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
* may be dropped before doing I/O or by lock_page_maybe_drop_mmap(). * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
* *
* If our return value does not have VM_FAULT_RETRY set, the mmap_lock * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
* has not been released. * has not been released.
...@@ -3054,29 +3053,27 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3054,29 +3053,27 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
struct file *fpin = NULL; struct file *fpin = NULL;
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff; pgoff_t max_idx, index = vmf->pgoff;
pgoff_t max_off; struct folio *folio;
struct page *page;
vm_fault_t ret = 0; vm_fault_t ret = 0;
bool mapping_locked = false; bool mapping_locked = false;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(offset >= max_off)) if (unlikely(index >= max_idx))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
/* /*
* Do we have something in the page cache already? * Do we have something in the page cache already?
*/ */
page = find_get_page(mapping, offset); folio = filemap_get_folio(mapping, index);
if (likely(page)) { if (likely(folio)) {
struct folio *folio = page_folio(page);
/* /*
* We found the page, so try async readahead before waiting for * We found the page, so try async readahead before waiting for
* the lock. * the lock.
*/ */
if (!(vmf->flags & FAULT_FLAG_TRIED)) if (!(vmf->flags & FAULT_FLAG_TRIED))
fpin = do_async_mmap_readahead(vmf, folio); fpin = do_async_mmap_readahead(vmf, folio);
if (unlikely(!PageUptodate(page))) { if (unlikely(!folio_test_uptodate(folio))) {
filemap_invalidate_lock_shared(mapping); filemap_invalidate_lock_shared(mapping);
mapping_locked = true; mapping_locked = true;
} }
...@@ -3088,17 +3085,17 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3088,17 +3085,17 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
fpin = do_sync_mmap_readahead(vmf); fpin = do_sync_mmap_readahead(vmf);
retry_find: retry_find:
/* /*
* See comment in filemap_create_page() why we need * See comment in filemap_create_folio() why we need
* invalidate_lock * invalidate_lock
*/ */
if (!mapping_locked) { if (!mapping_locked) {
filemap_invalidate_lock_shared(mapping); filemap_invalidate_lock_shared(mapping);
mapping_locked = true; mapping_locked = true;
} }
page = pagecache_get_page(mapping, offset, folio = __filemap_get_folio(mapping, index,
FGP_CREAT|FGP_FOR_MMAP, FGP_CREAT|FGP_FOR_MMAP,
vmf->gfp_mask); vmf->gfp_mask);
if (!page) { if (!folio) {
if (fpin) if (fpin)
goto out_retry; goto out_retry;
filemap_invalidate_unlock_shared(mapping); filemap_invalidate_unlock_shared(mapping);
...@@ -3106,22 +3103,22 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3106,22 +3103,22 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
} }
} }
if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
goto out_retry; goto out_retry;
/* Did it get truncated? */ /* Did it get truncated? */
if (unlikely(compound_head(page)->mapping != mapping)) { if (unlikely(folio->mapping != mapping)) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
goto retry_find; goto retry_find;
} }
VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
/* /*
* We have a locked page in the page cache, now we need to check * We have a locked page in the page cache, now we need to check
* that it's up-to-date. If not, it is going to be due to an error. * that it's up-to-date. If not, it is going to be due to an error.
*/ */
if (unlikely(!PageUptodate(page))) { if (unlikely(!folio_test_uptodate(folio))) {
/* /*
* The page was in cache and uptodate and now it is not. * The page was in cache and uptodate and now it is not.
* Strange but possible since we didn't hold the page lock all * Strange but possible since we didn't hold the page lock all
...@@ -3129,8 +3126,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3129,8 +3126,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* try again. * try again.
*/ */
if (!mapping_locked) { if (!mapping_locked) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
goto retry_find; goto retry_find;
} }
goto page_not_uptodate; goto page_not_uptodate;
...@@ -3142,7 +3139,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3142,7 +3139,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* redo the fault. * redo the fault.
*/ */
if (fpin) { if (fpin) {
unlock_page(page); folio_unlock(folio);
goto out_retry; goto out_retry;
} }
if (mapping_locked) if (mapping_locked)
...@@ -3152,14 +3149,14 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3152,14 +3149,14 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* Found the page and have a reference on it. * Found the page and have a reference on it.
* We must recheck i_size under page lock. * We must recheck i_size under page lock.
*/ */
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(offset >= max_off)) { if (unlikely(index >= max_idx)) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
vmf->page = page; vmf->page = folio_file_page(folio, index);
return ret | VM_FAULT_LOCKED; return ret | VM_FAULT_LOCKED;
page_not_uptodate: page_not_uptodate:
...@@ -3170,10 +3167,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3170,10 +3167,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* and we need to check for errors. * and we need to check for errors.
*/ */
fpin = maybe_unlock_mmap_for_io(vmf, fpin); fpin = maybe_unlock_mmap_for_io(vmf, fpin);
error = filemap_read_folio(file, mapping, page_folio(page)); error = filemap_read_folio(file, mapping, folio);
if (fpin) if (fpin)
goto out_retry; goto out_retry;
put_page(page); folio_put(folio);
if (!error || error == AOP_TRUNCATED_PAGE) if (!error || error == AOP_TRUNCATED_PAGE)
goto retry_find; goto retry_find;
...@@ -3187,8 +3184,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) ...@@ -3187,8 +3184,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* re-find the vma and come back and find our hopefully still populated * re-find the vma and come back and find our hopefully still populated
* page. * page.
*/ */
if (page) if (folio)
put_page(page); folio_put(folio);
if (mapping_locked) if (mapping_locked)
filemap_invalidate_unlock_shared(mapping); filemap_invalidate_unlock_shared(mapping);
if (fpin) if (fpin)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment