Commit 3c98a41c authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

buffer: convert grow_dev_page() to use a folio

Get a folio from the page cache instead of a page, then use the folio API
throughout.  Removes a few calls to compound_head() and may be needed to
support block size > PAGE_SIZE.

Link: https://lkml.kernel.org/r/20230612210141.730128-11-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4a9622f2
...@@ -976,7 +976,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, ...@@ -976,7 +976,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
pgoff_t index, int size, int sizebits, gfp_t gfp) pgoff_t index, int size, int sizebits, gfp_t gfp)
{ {
struct inode *inode = bdev->bd_inode; struct inode *inode = bdev->bd_inode;
struct page *page; struct folio *folio;
struct buffer_head *bh; struct buffer_head *bh;
sector_t end_block; sector_t end_block;
int ret = 0; int ret = 0;
...@@ -992,42 +992,38 @@ grow_dev_page(struct block_device *bdev, sector_t block, ...@@ -992,42 +992,38 @@ grow_dev_page(struct block_device *bdev, sector_t block,
*/ */
gfp_mask |= __GFP_NOFAIL; gfp_mask |= __GFP_NOFAIL;
page = find_or_create_page(inode->i_mapping, index, gfp_mask); folio = __filemap_get_folio(inode->i_mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask);
BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) { bh = folio_buffers(folio);
bh = page_buffers(page); if (bh) {
if (bh->b_size == size) { if (bh->b_size == size) {
end_block = init_page_buffers(page, bdev, end_block = init_page_buffers(&folio->page, bdev,
(sector_t)index << sizebits, (sector_t)index << sizebits,
size); size);
goto done; goto done;
} }
if (!try_to_free_buffers(page_folio(page))) if (!try_to_free_buffers(folio))
goto failed; goto failed;
} }
/* bh = folio_alloc_buffers(folio, size, true);
* Allocate some buffers for this page
*/
bh = alloc_page_buffers(page, size, true);
/* /*
* Link the page to the buffers and initialise them. Take the * Link the folio to the buffers and initialise them. Take the
* lock to be atomic wrt __find_get_block(), which does not * lock to be atomic wrt __find_get_block(), which does not
* run under the page lock. * run under the folio lock.
*/ */
spin_lock(&inode->i_mapping->private_lock); spin_lock(&inode->i_mapping->private_lock);
link_dev_buffers(page, bh); link_dev_buffers(&folio->page, bh);
end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, end_block = init_page_buffers(&folio->page, bdev,
size); (sector_t)index << sizebits, size);
spin_unlock(&inode->i_mapping->private_lock); spin_unlock(&inode->i_mapping->private_lock);
done: done:
ret = (block < end_block) ? 1 : -ENXIO; ret = (block < end_block) ? 1 : -ENXIO;
failed: failed:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment