Commit 24a7b352 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

ntfs: convert ntfs_prepare_pages_for_non_resident_write() to folios

Convert each element of the pages array to a folio before using it.  This
in no way renders the function large-folio safe, but it does remove a lot
of hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20231016201114.1928083-20-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a04eb7cb
...@@ -567,7 +567,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -567,7 +567,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
LCN lcn; LCN lcn;
s64 bh_pos, vcn_len, end, initialized_size; s64 bh_pos, vcn_len, end, initialized_size;
sector_t lcn_block; sector_t lcn_block;
struct page *page; struct folio *folio;
struct inode *vi; struct inode *vi;
ntfs_inode *ni, *base_ni = NULL; ntfs_inode *ni, *base_ni = NULL;
ntfs_volume *vol; ntfs_volume *vol;
...@@ -601,20 +601,6 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -601,20 +601,6 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
(long long)pos, bytes); (long long)pos, bytes);
blocksize = vol->sb->s_blocksize; blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits; blocksize_bits = vol->sb->s_blocksize_bits;
u = 0;
do {
page = pages[u];
BUG_ON(!page);
/*
* create_empty_buffers() will create uptodate/dirty buffers if
* the page is uptodate/dirty.
*/
if (!page_has_buffers(page)) {
create_empty_buffers(page, blocksize, 0);
if (unlikely(!page_has_buffers(page)))
return -ENOMEM;
}
} while (++u < nr_pages);
rl_write_locked = false; rl_write_locked = false;
rl = NULL; rl = NULL;
err = 0; err = 0;
...@@ -626,14 +612,21 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -626,14 +612,21 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
end = pos + bytes; end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
/* /*
* Loop over each page and for each page over each buffer. Use goto to * Loop over each buffer in each folio. Use goto to
* reduce indentation. * reduce indentation.
*/ */
u = 0; u = 0;
do_next_page: do_next_folio:
page = pages[u]; folio = page_folio(pages[u]);
bh_pos = (s64)page->index << PAGE_SHIFT; bh_pos = folio_pos(folio);
bh = head = page_buffers(page); head = folio_buffers(folio);
if (!head)
/*
* create_empty_buffers() will create uptodate/dirty
* buffers if the folio is uptodate/dirty.
*/
head = folio_create_empty_buffers(folio, blocksize, 0);
bh = head;
do { do {
VCN cdelta; VCN cdelta;
s64 bh_end; s64 bh_end;
...@@ -653,15 +646,15 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -653,15 +646,15 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
continue; continue;
/* /*
* The buffer is not uptodate. If the page is uptodate * The buffer is not uptodate. If the folio is uptodate
* set the buffer uptodate and otherwise ignore it. * set the buffer uptodate and otherwise ignore it.
*/ */
if (PageUptodate(page)) { if (folio_test_uptodate(folio)) {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
/* /*
* Neither the page nor the buffer are uptodate. If * Neither the folio nor the buffer are uptodate. If
* the buffer is only partially being written to, we * the buffer is only partially being written to, we
* need to read it in before the write, i.e. now. * need to read it in before the write, i.e. now.
*/ */
...@@ -679,7 +672,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -679,7 +672,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
ntfs_submit_bh_for_read(bh); ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { } else {
zero_user(page, bh_offset(bh), folio_zero_range(folio, bh_offset(bh),
blocksize); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
...@@ -706,7 +699,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -706,7 +699,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
(bh_cofs >> blocksize_bits); (bh_cofs >> blocksize_bits);
set_buffer_mapped(bh); set_buffer_mapped(bh);
/* /*
* If the page is uptodate so is the buffer. If the * If the folio is uptodate so is the buffer. If the
* buffer is fully outside the write, we ignore it if * buffer is fully outside the write, we ignore it if
* it was already allocated and we mark it dirty so it * it was already allocated and we mark it dirty so it
* gets written out if we allocated it. On the other * gets written out if we allocated it. On the other
...@@ -714,7 +707,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -714,7 +707,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
* marking it dirty we set buffer_new so we can do * marking it dirty we set buffer_new so we can do
* error recovery. * error recovery.
*/ */
if (PageUptodate(page)) { if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
if (unlikely(was_hole)) { if (unlikely(was_hole)) {
...@@ -754,7 +747,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -754,7 +747,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
ntfs_submit_bh_for_read(bh); ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { } else {
zero_user(page, bh_offset(bh), folio_zero_range(folio,
bh_offset(bh),
blocksize); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
...@@ -773,7 +767,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -773,7 +767,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
*/ */
if (bh_end <= pos || bh_pos >= end) { if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
zero_user(page, bh_offset(bh), folio_zero_range(folio, bh_offset(bh),
blocksize); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
...@@ -786,7 +780,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -786,7 +780,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u8 *kaddr; u8 *kaddr;
unsigned pofs; unsigned pofs;
kaddr = kmap_atomic(page); kaddr = kmap_local_folio(folio, 0);
if (bh_pos < pos) { if (bh_pos < pos) {
pofs = bh_pos & ~PAGE_MASK; pofs = bh_pos & ~PAGE_MASK;
memset(kaddr + pofs, 0, pos - bh_pos); memset(kaddr + pofs, 0, pos - bh_pos);
...@@ -795,8 +789,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -795,8 +789,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
pofs = end & ~PAGE_MASK; pofs = end & ~PAGE_MASK;
memset(kaddr + pofs, 0, bh_end - end); memset(kaddr + pofs, 0, bh_end - end);
} }
kunmap_atomic(kaddr); kunmap_local(kaddr);
flush_dcache_page(page); flush_dcache_folio(folio);
} }
continue; continue;
} }
...@@ -809,11 +803,12 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -809,11 +803,12 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
initialized_size = ni->allocated_size; initialized_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags); read_unlock_irqrestore(&ni->size_lock, flags);
if (bh_pos > initialized_size) { if (bh_pos > initialized_size) {
if (PageUptodate(page)) { if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) { } else if (!buffer_uptodate(bh)) {
zero_user(page, bh_offset(bh), blocksize); folio_zero_range(folio, bh_offset(bh),
blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
continue; continue;
...@@ -927,17 +922,17 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -927,17 +922,17 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
bh->b_blocknr = -1; bh->b_blocknr = -1;
/* /*
* If the buffer is uptodate we skip it. If it * If the buffer is uptodate we skip it. If it
* is not but the page is uptodate, we can set * is not but the folio is uptodate, we can set
* the buffer uptodate. If the page is not * the buffer uptodate. If the folio is not
* uptodate, we can clear the buffer and set it * uptodate, we can clear the buffer and set it
* uptodate. Whether this is worthwhile is * uptodate. Whether this is worthwhile is
* debatable and this could be removed. * debatable and this could be removed.
*/ */
if (PageUptodate(page)) { if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) { } else if (!buffer_uptodate(bh)) {
zero_user(page, bh_offset(bh), folio_zero_range(folio, bh_offset(bh),
blocksize); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
...@@ -1167,7 +1162,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1167,7 +1162,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
} while (bh_pos += blocksize, (bh = bh->b_this_page) != head); } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
/* If there are no errors, do the next page. */ /* If there are no errors, do the next page. */
if (likely(!err && ++u < nr_pages)) if (likely(!err && ++u < nr_pages))
goto do_next_page; goto do_next_folio;
/* If there are no errors, release the runlist lock if we took it. */ /* If there are no errors, release the runlist lock if we took it. */
if (likely(!err)) { if (likely(!err)) {
if (unlikely(rl_write_locked)) { if (unlikely(rl_write_locked)) {
...@@ -1185,9 +1180,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1185,9 +1180,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
bh = *--wait_bh; bh = *--wait_bh;
wait_on_buffer(bh); wait_on_buffer(bh);
if (likely(buffer_uptodate(bh))) { if (likely(buffer_uptodate(bh))) {
page = bh->b_page; folio = bh->b_folio;
bh_pos = ((s64)page->index << PAGE_SHIFT) + bh_pos = folio_pos(folio) + bh_offset(bh);
bh_offset(bh);
/* /*
* If the buffer overflows the initialized size, need * If the buffer overflows the initialized size, need
* to zero the overflowing region. * to zero the overflowing region.
...@@ -1197,7 +1191,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1197,7 +1191,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (likely(bh_pos < initialized_size)) if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos; ofs = initialized_size - bh_pos;
zero_user_segment(page, bh_offset(bh) + ofs, folio_zero_segment(folio, bh_offset(bh) + ofs,
blocksize); blocksize);
} }
} else /* if (unlikely(!buffer_uptodate(bh))) */ } else /* if (unlikely(!buffer_uptodate(bh))) */
...@@ -1324,21 +1318,20 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1324,21 +1318,20 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u = 0; u = 0;
end = bh_cpos << vol->cluster_size_bits; end = bh_cpos << vol->cluster_size_bits;
do { do {
page = pages[u]; folio = page_folio(pages[u]);
bh = head = page_buffers(page); bh = head = folio_buffers(folio);
do { do {
if (u == nr_pages && if (u == nr_pages &&
((s64)page->index << PAGE_SHIFT) + folio_pos(folio) + bh_offset(bh) >= end)
bh_offset(bh) >= end)
break; break;
if (!buffer_new(bh)) if (!buffer_new(bh))
continue; continue;
clear_buffer_new(bh); clear_buffer_new(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
if (PageUptodate(page)) if (folio_test_uptodate(folio))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
else { else {
zero_user(page, bh_offset(bh), folio_zero_range(folio, bh_offset(bh),
blocksize); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment