Commit 14059f66 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

fs: remove the bh_end_io argument from __block_write_full_folio

All callers are passing end_buffer_async_write as this argument, so we can
hardcode references to it within __block_write_full_folio().  That lets us
make end_buffer_async_write() static.

Link: https://lkml.kernel.org/r/20231215200245.748418-15-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 17bf23a9
...@@ -372,10 +372,10 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) ...@@ -372,10 +372,10 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
} }
/* /*
* Completion handler for block_write_full_folio() - pages which are unlocked * Completion handler for block_write_full_folio() - folios which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion. * during I/O, and which have the writeback flag cleared upon I/O completion.
*/ */
void end_buffer_async_write(struct buffer_head *bh, int uptodate) static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{ {
unsigned long flags; unsigned long flags;
struct buffer_head *first; struct buffer_head *first;
...@@ -415,7 +415,6 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) ...@@ -415,7 +415,6 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
spin_unlock_irqrestore(&first->b_uptodate_lock, flags); spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
return; return;
} }
EXPORT_SYMBOL(end_buffer_async_write);
/* /*
* If a page's buffers are under async readin (end_buffer_async_read * If a page's buffers are under async readin (end_buffer_async_read
...@@ -1787,8 +1786,7 @@ static struct buffer_head *folio_create_buffers(struct folio *folio, ...@@ -1787,8 +1786,7 @@ static struct buffer_head *folio_create_buffers(struct folio *folio,
* causes the writes to be flagged as synchronous writes. * causes the writes to be flagged as synchronous writes.
*/ */
int __block_write_full_folio(struct inode *inode, struct folio *folio, int __block_write_full_folio(struct inode *inode, struct folio *folio,
get_block_t *get_block, struct writeback_control *wbc, get_block_t *get_block, struct writeback_control *wbc)
bh_end_io_t *handler)
{ {
int err; int err;
sector_t block; sector_t block;
...@@ -1867,7 +1865,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio, ...@@ -1867,7 +1865,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
continue; continue;
} }
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
mark_buffer_async_write_endio(bh, handler); mark_buffer_async_write_endio(bh,
end_buffer_async_write);
} else { } else {
unlock_buffer(bh); unlock_buffer(bh);
} }
...@@ -1920,7 +1919,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio, ...@@ -1920,7 +1919,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
if (buffer_mapped(bh) && buffer_dirty(bh) && if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) { !buffer_delay(bh)) {
lock_buffer(bh); lock_buffer(bh);
mark_buffer_async_write_endio(bh, handler); mark_buffer_async_write_endio(bh,
end_buffer_async_write);
} else { } else {
/* /*
* The buffer may have been set dirty during * The buffer may have been set dirty during
...@@ -2704,8 +2704,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, ...@@ -2704,8 +2704,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
/* Is the folio fully inside i_size? */ /* Is the folio fully inside i_size? */
if (folio_pos(folio) + folio_size(folio) <= i_size) if (folio_pos(folio) + folio_size(folio) <= i_size)
return __block_write_full_folio(inode, folio, get_block, wbc, return __block_write_full_folio(inode, folio, get_block, wbc);
end_buffer_async_write);
/* Is the folio fully outside i_size? (truncate in progress) */ /* Is the folio fully outside i_size? (truncate in progress) */
if (folio_pos(folio) >= i_size) { if (folio_pos(folio) >= i_size) {
...@@ -2722,8 +2721,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, ...@@ -2722,8 +2721,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
*/ */
folio_zero_segment(folio, offset_in_folio(folio, i_size), folio_zero_segment(folio, offset_in_folio(folio, i_size),
folio_size(folio)); folio_size(folio));
return __block_write_full_folio(inode, folio, get_block, wbc, return __block_write_full_folio(inode, folio, get_block, wbc);
end_buffer_async_write);
} }
sector_t generic_block_bmap(struct address_space *mapping, sector_t block, sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
......
...@@ -108,7 +108,7 @@ static int gfs2_write_jdata_folio(struct folio *folio, ...@@ -108,7 +108,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
folio_size(folio)); folio_size(folio));
return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
wbc, end_buffer_async_write); wbc);
} }
/** /**
......
...@@ -205,7 +205,6 @@ struct buffer_head *create_empty_buffers(struct folio *folio, ...@@ -205,7 +205,6 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state); unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */ /* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
...@@ -255,8 +254,7 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); ...@@ -255,8 +254,7 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
void *get_block); void *get_block);
int __block_write_full_folio(struct inode *inode, struct folio *folio, int __block_write_full_folio(struct inode *inode, struct folio *folio,
get_block_t *get_block, struct writeback_control *wbc, get_block_t *get_block, struct writeback_control *wbc);
bh_end_io_t *handler);
int block_read_full_folio(struct folio *, get_block_t *); int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment