Commit 414ae0a4 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

ocfs2: convert ocfs2_map_page_blocks to use a folio

Convert the page argument to a folio and then use the folio APIs
throughout.  Replaces three hidden calls to compound_head() with one
explicit one.

Link: https://lkml.kernel.org/r/20231016201114.1928083-22-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c3f4200a
...@@ -568,10 +568,10 @@ static void ocfs2_clear_page_regions(struct page *page, ...@@ -568,10 +568,10 @@ static void ocfs2_clear_page_regions(struct page *page,
* read-in the blocks at the tail of our file. Avoid reading them by * read-in the blocks at the tail of our file. Avoid reading them by
* testing i_size against each block offset. * testing i_size against each block offset.
*/ */
static int ocfs2_should_read_blk(struct inode *inode, struct page *page, static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio,
unsigned int block_start) unsigned int block_start)
{ {
u64 offset = page_offset(page) + block_start; u64 offset = folio_pos(folio) + block_start;
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
return 1; return 1;
...@@ -593,15 +593,16 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, ...@@ -593,15 +593,16 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
struct inode *inode, unsigned int from, struct inode *inode, unsigned int from,
unsigned int to, int new) unsigned int to, int new)
{ {
struct folio *folio = page_folio(page);
int ret = 0; int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start; unsigned int block_end, block_start;
unsigned int bsize = i_blocksize(inode); unsigned int bsize = i_blocksize(inode);
if (!page_has_buffers(page)) head = folio_buffers(folio);
create_empty_buffers(page, bsize, 0); if (!head)
head = folio_create_empty_buffers(folio, bsize, 0);
head = page_buffers(page);
for (bh = head, block_start = 0; bh != head || !block_start; for (bh = head, block_start = 0; bh != head || !block_start;
bh = bh->b_this_page, block_start += bsize) { bh = bh->b_this_page, block_start += bsize) {
block_end = block_start + bsize; block_end = block_start + bsize;
...@@ -613,7 +614,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, ...@@ -613,7 +614,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
* they may belong to unallocated clusters. * they may belong to unallocated clusters.
*/ */
if (block_start >= to || block_end <= from) { if (block_start >= to || block_end <= from) {
if (PageUptodate(page)) if (folio_test_uptodate(folio))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
...@@ -630,11 +631,11 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, ...@@ -630,11 +631,11 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
clean_bdev_bh_alias(bh); clean_bdev_bh_alias(bh);
} }
if (PageUptodate(page)) { if (folio_test_uptodate(folio)) {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh) && !buffer_delay(bh) && } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_new(bh) && !buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) && ocfs2_should_read_blk(inode, folio, block_start) &&
(block_start < from || block_end > to)) { (block_start < from || block_end > to)) {
bh_read_nowait(bh, 0); bh_read_nowait(bh, 0);
*wait_bh++=bh; *wait_bh++=bh;
...@@ -668,7 +669,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, ...@@ -668,7 +669,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
if (block_start >= to) if (block_start >= to)
break; break;
zero_user(page, block_start, bh->b_size); folio_zero_range(folio, block_start, bh->b_size);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment