Commit ff5710c3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

nilfs2: convert nilfs_segctor_prepare_write to use folios

Use the new folio APIs, saving 17 hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20231114084436.2755-11-konishi.ryusuke@gmail.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6609e235
...@@ -1665,39 +1665,39 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) ...@@ -1665,39 +1665,39 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
return 0; return 0;
} }
static void nilfs_begin_page_io(struct page *page) static void nilfs_begin_folio_io(struct folio *folio)
{ {
if (!page || PageWriteback(page)) if (!folio || folio_test_writeback(folio))
/* /*
* For split b-tree node pages, this function may be called * For split b-tree node pages, this function may be called
* twice. We ignore the 2nd or later calls by this check. * twice. We ignore the 2nd or later calls by this check.
*/ */
return; return;
lock_page(page); folio_lock(folio);
clear_page_dirty_for_io(page); folio_clear_dirty_for_io(folio);
set_page_writeback(page); folio_start_writeback(folio);
unlock_page(page); folio_unlock(folio);
} }
static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
{ {
struct nilfs_segment_buffer *segbuf; struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL; struct folio *bd_folio = NULL, *fs_folio = NULL;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
struct buffer_head *bh; struct buffer_head *bh;
list_for_each_entry(bh, &segbuf->sb_segsum_buffers, list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) { b_assoc_buffers) {
if (bh->b_page != bd_page) { if (bh->b_folio != bd_folio) {
if (bd_page) { if (bd_folio) {
lock_page(bd_page); folio_lock(bd_folio);
clear_page_dirty_for_io(bd_page); folio_clear_dirty_for_io(bd_folio);
set_page_writeback(bd_page); folio_start_writeback(bd_folio);
unlock_page(bd_page); folio_unlock(bd_folio);
} }
bd_page = bh->b_page; bd_folio = bh->b_folio;
} }
} }
...@@ -1705,28 +1705,28 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) ...@@ -1705,28 +1705,28 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
b_assoc_buffers) { b_assoc_buffers) {
set_buffer_async_write(bh); set_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) { if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) { if (bh->b_folio != bd_folio) {
lock_page(bd_page); folio_lock(bd_folio);
clear_page_dirty_for_io(bd_page); folio_clear_dirty_for_io(bd_folio);
set_page_writeback(bd_page); folio_start_writeback(bd_folio);
unlock_page(bd_page); folio_unlock(bd_folio);
bd_page = bh->b_page; bd_folio = bh->b_folio;
} }
break; break;
} }
if (bh->b_page != fs_page) { if (bh->b_folio != fs_folio) {
nilfs_begin_page_io(fs_page); nilfs_begin_folio_io(fs_folio);
fs_page = bh->b_page; fs_folio = bh->b_folio;
} }
} }
} }
if (bd_page) { if (bd_folio) {
lock_page(bd_page); folio_lock(bd_folio);
clear_page_dirty_for_io(bd_page); folio_clear_dirty_for_io(bd_folio);
set_page_writeback(bd_page); folio_start_writeback(bd_folio);
unlock_page(bd_page); folio_unlock(bd_folio);
} }
nilfs_begin_page_io(fs_page); nilfs_begin_folio_io(fs_folio);
} }
static int nilfs_segctor_write(struct nilfs_sc_info *sci, static int nilfs_segctor_write(struct nilfs_sc_info *sci,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment