hfs: Convert to release_folio

Use a folio throughout hfs_release_folio().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent e45c20d1
...@@ -69,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block) ...@@ -69,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block); return generic_block_bmap(mapping, block, hfs_get_block);
} }
static int hfs_releasepage(struct page *page, gfp_t mask) static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct hfs_btree *tree; struct hfs_btree *tree;
struct hfs_bnode *node; struct hfs_bnode *node;
u32 nidx; u32 nidx;
int i, res = 1; int i;
bool res = true;
switch (inode->i_ino) { switch (inode->i_ino) {
case HFS_EXT_CNID: case HFS_EXT_CNID:
...@@ -87,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask) ...@@ -87,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
break; break;
default: default:
BUG(); BUG();
return 0; return false;
} }
if (!tree) if (!tree)
return 0; return false;
if (tree->node_size >= PAGE_SIZE) { if (tree->node_size >= PAGE_SIZE) {
nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT); nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock); spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx); node = hfs_bnode_findhash(tree, nidx);
if (!node) if (!node)
; ;
else if (atomic_read(&node->refcnt)) else if (atomic_read(&node->refcnt))
res = 0; res = false;
if (res && node) { if (res && node) {
hfs_bnode_unhash(node); hfs_bnode_unhash(node);
hfs_bnode_free(node); hfs_bnode_free(node);
} }
spin_unlock(&tree->hash_lock); spin_unlock(&tree->hash_lock);
} else { } else {
nidx = page->index << (PAGE_SHIFT - tree->node_size_shift); nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift); i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock); spin_lock(&tree->hash_lock);
do { do {
...@@ -115,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask) ...@@ -115,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
if (!node) if (!node)
continue; continue;
if (atomic_read(&node->refcnt)) { if (atomic_read(&node->refcnt)) {
res = 0; res = false;
break; break;
} }
hfs_bnode_unhash(node); hfs_bnode_unhash(node);
...@@ -123,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask) ...@@ -123,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count); } while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock); spin_unlock(&tree->hash_lock);
} }
return res ? try_to_free_buffers(page) : 0; return res ? try_to_free_buffers(&folio->page) : false;
} }
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
...@@ -165,7 +166,7 @@ const struct address_space_operations hfs_btree_aops = { ...@@ -165,7 +166,7 @@ const struct address_space_operations hfs_btree_aops = {
.write_begin = hfs_write_begin, .write_begin = hfs_write_begin,
.write_end = generic_write_end, .write_end = generic_write_end,
.bmap = hfs_bmap, .bmap = hfs_bmap,
.releasepage = hfs_releasepage, .release_folio = hfs_release_folio,
}; };
const struct address_space_operations hfs_aops = { const struct address_space_operations hfs_aops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment