Commit 6609e235 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

nilfs2: convert to __nilfs_clear_folio_dirty()

All callers now have a folio, so convert to pass a folio.  No caller uses
the return value, so make it return void.  Removes a couple of hidden
calls to compound_head().

Link: https://lkml.kernel.org/r/20231114084436.2755-10-konishi.ryusuke@gmail.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5d3b5903
...@@ -82,7 +82,7 @@ void nilfs_forget_buffer(struct buffer_head *bh) ...@@ -82,7 +82,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
lock_buffer(bh); lock_buffer(bh);
set_mask_bits(&bh->b_state, clear_bits, 0); set_mask_bits(&bh->b_state, clear_bits, 0);
if (nilfs_folio_buffers_clean(folio)) if (nilfs_folio_buffers_clean(folio))
__nilfs_clear_page_dirty(&folio->page); __nilfs_clear_folio_dirty(folio);
bh->b_blocknr = -1; bh->b_blocknr = -1;
folio_clear_uptodate(folio); folio_clear_uptodate(folio);
...@@ -428,7 +428,7 @@ void nilfs_clear_folio_dirty(struct folio *folio, bool silent) ...@@ -428,7 +428,7 @@ void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
} while (bh = bh->b_this_page, bh != head); } while (bh = bh->b_this_page, bh != head);
} }
__nilfs_clear_page_dirty(&folio->page); __nilfs_clear_folio_dirty(folio);
} }
unsigned int nilfs_page_count_clean_buffers(struct page *page, unsigned int nilfs_page_count_clean_buffers(struct page *page,
...@@ -458,22 +458,23 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page, ...@@ -458,22 +458,23 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
* 2) Some B-tree operations like insertion or deletion may dispose buffers * 2) Some B-tree operations like insertion or deletion may dispose buffers
* in dirty state, and this needs to cancel the dirty state of their pages. * in dirty state, and this needs to cancel the dirty state of their pages.
*/ */
int __nilfs_clear_page_dirty(struct page *page) void __nilfs_clear_folio_dirty(struct folio *folio)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
if (mapping) { if (mapping) {
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
if (test_bit(PG_dirty, &page->flags)) { if (folio_test_dirty(folio)) {
__xa_clear_mark(&mapping->i_pages, page_index(page), __xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
return clear_page_dirty_for_io(page); folio_clear_dirty_for_io(folio);
return;
} }
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
return 0; return;
} }
return TestClearPageDirty(page); folio_clear_dirty(folio);
} }
/** /**
......
...@@ -30,7 +30,7 @@ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ ...@@ -30,7 +30,7 @@ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */ BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
int __nilfs_clear_page_dirty(struct page *); void __nilfs_clear_folio_dirty(struct folio *);
struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *, struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
unsigned long, unsigned long); unsigned long, unsigned long);
......
...@@ -1760,7 +1760,7 @@ static void nilfs_end_folio_io(struct folio *folio, int err) ...@@ -1760,7 +1760,7 @@ static void nilfs_end_folio_io(struct folio *folio, int err)
*/ */
folio_lock(folio); folio_lock(folio);
if (nilfs_folio_buffers_clean(folio)) if (nilfs_folio_buffers_clean(folio))
__nilfs_clear_page_dirty(&folio->page); __nilfs_clear_folio_dirty(folio);
folio_unlock(folio); folio_unlock(folio);
} }
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment