Commit 3ed65f04 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

buffer: hoist GFP flags from grow_dev_page() to __getblk_gfp()

grow_dev_page() is only called by grow_buffers().  grow_buffers() is only
called by __getblk_slow() and __getblk_slow() is only called from
__getblk_gfp(), so it is safe to move the GFP flags setting all the way
up.  With that done, add a new bdev_getblk() entry point that leaves the
GFP flags the way the caller specified them.

[willy@infradead.org: fix grow_dev_page() error handling]
  Link: https://lkml.kernel.org/r/ZRREEIwqiy5DijKB@casper.infradead.org
Link: https://lkml.kernel.org/r/20230914150011.843330-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Hui Zhu <teawater@antgroup.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2a418157
...@@ -1043,20 +1043,11 @@ grow_dev_page(struct block_device *bdev, sector_t block, ...@@ -1043,20 +1043,11 @@ grow_dev_page(struct block_device *bdev, sector_t block,
struct buffer_head *bh; struct buffer_head *bh;
sector_t end_block; sector_t end_block;
int ret = 0; int ret = 0;
gfp_t gfp_mask;
gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
/*
* XXX: __getblk_slow() can not really deal with failure and
* will endlessly loop on improvised global reclaim. Prefer
* looping in the allocator rather than here, at least that
* code knows what it's doing.
*/
gfp_mask |= __GFP_NOFAIL;
folio = __filemap_get_folio(inode->i_mapping, index, folio = __filemap_get_folio(inode->i_mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask); FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return PTR_ERR(folio);
bh = folio_buffers(folio); bh = folio_buffers(folio);
if (bh) { if (bh) {
...@@ -1069,7 +1060,10 @@ grow_dev_page(struct block_device *bdev, sector_t block, ...@@ -1069,7 +1060,10 @@ grow_dev_page(struct block_device *bdev, sector_t block,
goto failed; goto failed;
} }
bh = folio_alloc_buffers(folio, size, gfp_mask | __GFP_ACCOUNT); ret = -ENOMEM;
bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
if (!bh)
goto failed;
/* /*
* Link the folio to the buffers and initialise them. Take the * Link the folio to the buffers and initialise them. Take the
...@@ -1420,24 +1414,49 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size) ...@@ -1420,24 +1414,49 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
} }
EXPORT_SYMBOL(__find_get_block); EXPORT_SYMBOL(__find_get_block);
/**
* bdev_getblk - Get a buffer_head in a block device's buffer cache.
* @bdev: The block device.
* @block: The block number.
* @size: The size of buffer_heads for this @bdev.
* @gfp: The memory allocation flags to use.
*
* In contrast to __getblk_gfp(), the @gfp flags must be all of the flags;
* they are not augmented with the mapping's GFP flags.
*
* Return: The buffer head, or NULL if memory could not be allocated.
*/
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp)
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
might_alloc(gfp);
if (bh)
return bh;
return __getblk_slow(bdev, block, size, gfp);
}
EXPORT_SYMBOL(bdev_getblk);
/* /*
* __getblk_gfp() will locate (and, if necessary, create) the buffer_head * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
* which corresponds to the passed block_device, block and size. The * which corresponds to the passed block_device, block and size. The
* returned buffer has its reference count incremented. * returned buffer has its reference count incremented.
*
* __getblk_gfp() will lock up the machine if grow_dev_page's
* try_to_free_buffers() attempt is failing. FIXME, perhaps?
*/ */
struct buffer_head * struct buffer_head *
__getblk_gfp(struct block_device *bdev, sector_t block, __getblk_gfp(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp) unsigned size, gfp_t gfp)
{ {
struct buffer_head *bh = __find_get_block(bdev, block, size); gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
might_sleep(); /*
if (bh == NULL) * Prefer looping in the allocator rather than here, at least that
bh = __getblk_slow(bdev, block, size, gfp); * code knows what it's doing.
return bh; */
gfp |= __GFP_NOFAIL;
return bdev_getblk(bdev, block, size, gfp);
} }
EXPORT_SYMBOL(__getblk_gfp); EXPORT_SYMBOL(__getblk_gfp);
......
...@@ -227,6 +227,8 @@ void __wait_on_buffer(struct buffer_head *); ...@@ -227,6 +227,8 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size); unsigned size);
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp);
struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp); unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *); void __brelse(struct buffer_head *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment