Commit 7f79d85b authored by Ritesh Harjani (IBM)'s avatar Ritesh Harjani (IBM)

iomap: Refactor iomap_write_delalloc_punch() function out

This patch factors iomap_write_delalloc_punch() function out. This function
is resposible for actual punch out operation.
The reason for doing this is, to avoid deep indentation when we bring
punch-out of individual non-dirty blocks within a dirty folio in a later
patch (which adds per-block dirty status handling to iomap) to avoid
delalloc block leak.
Signed-off-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 0af2b37d
...@@ -883,6 +883,33 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, ...@@ -883,6 +883,33 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
} }
EXPORT_SYMBOL_GPL(iomap_file_buffered_write); EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
iomap_punch_t punch)
{
int ret = 0;
if (!folio_test_dirty(folio))
return ret;
/* if dirty, punch up to offset */
if (start_byte > *punch_start_byte) {
ret = punch(inode, *punch_start_byte,
start_byte - *punch_start_byte);
if (ret)
return ret;
}
/*
* Make sure the next punch start is correctly bound to
* the end of this data range, not the end of the folio.
*/
*punch_start_byte = min_t(loff_t, end_byte,
folio_pos(folio) + folio_size(folio));
return ret;
}
/* /*
* Scan the data range passed to us for dirty page cache folios. If we find a * Scan the data range passed to us for dirty page cache folios. If we find a
* dirty folio, punch out the preceeding range and update the offset from which * dirty folio, punch out the preceeding range and update the offset from which
...@@ -906,6 +933,7 @@ static int iomap_write_delalloc_scan(struct inode *inode, ...@@ -906,6 +933,7 @@ static int iomap_write_delalloc_scan(struct inode *inode,
{ {
while (start_byte < end_byte) { while (start_byte < end_byte) {
struct folio *folio; struct folio *folio;
int ret;
/* grab locked page */ /* grab locked page */
folio = filemap_lock_folio(inode->i_mapping, folio = filemap_lock_folio(inode->i_mapping,
...@@ -916,26 +944,12 @@ static int iomap_write_delalloc_scan(struct inode *inode, ...@@ -916,26 +944,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
continue; continue;
} }
/* if dirty, punch up to offset */ ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
if (folio_test_dirty(folio)) { start_byte, end_byte, punch);
if (start_byte > *punch_start_byte) { if (ret) {
int error; folio_unlock(folio);
folio_put(folio);
error = punch(inode, *punch_start_byte, return ret;
start_byte - *punch_start_byte);
if (error) {
folio_unlock(folio);
folio_put(folio);
return error;
}
}
/*
* Make sure the next punch start is correctly bound to
* the end of this data range, not the end of the folio.
*/
*punch_start_byte = min_t(loff_t, end_byte,
folio_pos(folio) + folio_size(folio));
} }
/* move offset to start of next folio in range */ /* move offset to start of next folio in range */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment