Commit 8e2e1756 authored by Pankaj Raghav's avatar Pankaj Raghav Committed by Andrew Morton

fs/buffer: add folio_create_empty_buffers helper

Folio version of create_empty_buffers().  This is required to convert
create_page_buffers() to folio_create_buffers() later in the series.

It removes several calls to compound_head() as it works directly on folio
compared to create_empty_buffers().  Hence, create_empty_buffers() has
been modified to call folio_create_empty_buffers().

Link: https://lkml.kernel.org/r/20230417123618.22094-4-p.raghav@samsung.comSigned-off-by: default avatarPankaj Raghav <p.raghav@samsung.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c71124a8
......@@ -1594,18 +1594,17 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
}
EXPORT_SYMBOL(block_invalidate_folio);
/*
* We attach and possibly dirty the buffers atomically wrt
* block_dirty_folio() via private_lock. try_to_free_buffers
* is already excluded via the page lock.
* is already excluded via the folio lock.
*/
void create_empty_buffers(struct page *page,
unsigned long blocksize, unsigned long b_state)
void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
unsigned long b_state)
{
struct buffer_head *bh, *head, *tail;
head = alloc_page_buffers(page, blocksize, true);
head = folio_alloc_buffers(folio, blocksize, true);
bh = head;
do {
bh->b_state |= b_state;
......@@ -1614,19 +1613,26 @@ void create_empty_buffers(struct page *page,
} while (bh);
tail->b_this_page = head;
spin_lock(&page->mapping->private_lock);
if (PageUptodate(page) || PageDirty(page)) {
spin_lock(&folio->mapping->private_lock);
if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
bh = head;
do {
if (PageDirty(page))
if (folio_test_dirty(folio))
set_buffer_dirty(bh);
if (PageUptodate(page))
if (folio_test_uptodate(folio))
set_buffer_uptodate(bh);
bh = bh->b_this_page;
} while (bh != head);
}
attach_page_private(page, head);
spin_unlock(&page->mapping->private_lock);
folio_attach_private(folio, head);
spin_unlock(&folio->mapping->private_lock);
}
EXPORT_SYMBOL(folio_create_empty_buffers);
void create_empty_buffers(struct page *page,
unsigned long blocksize, unsigned long b_state)
{
folio_create_empty_buffers(page_folio(page), blocksize, b_state);
}
EXPORT_SYMBOL(create_empty_buffers);
......
......@@ -205,6 +205,8 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment