Commit 8c6cb3e3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

buffer: convert __block_commit_write() to take a folio

This removes a hidden call to compound_head() inside
__block_commit_write() and moves it to those callers which are still page
based.  Also make block_write_end() safe for large folios.

Link: https://lkml.kernel.org/r/20230612210141.730128-9-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fe181377
...@@ -2116,15 +2116,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, ...@@ -2116,15 +2116,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
} }
EXPORT_SYMBOL(__block_write_begin); EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page, static int __block_commit_write(struct inode *inode, struct folio *folio,
unsigned from, unsigned to) size_t from, size_t to)
{ {
unsigned block_start, block_end; size_t block_start, block_end;
int partial = 0; bool partial = false;
unsigned blocksize; unsigned blocksize;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
bh = head = page_buffers(page); bh = head = folio_buffers(folio);
blocksize = bh->b_size; blocksize = bh->b_size;
block_start = 0; block_start = 0;
...@@ -2132,7 +2132,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, ...@@ -2132,7 +2132,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
block_end = block_start + blocksize; block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) { if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
partial = 1; partial = true;
} else { } else {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
...@@ -2147,11 +2147,11 @@ static int __block_commit_write(struct inode *inode, struct page *page, ...@@ -2147,11 +2147,11 @@ static int __block_commit_write(struct inode *inode, struct page *page,
/* /*
* If this is a partial write which happened to make all buffers * If this is a partial write which happened to make all buffers
* uptodate then we can optimize away a bogus read_folio() for * uptodate then we can optimize away a bogus read_folio() for
* the next read(). Here we 'discover' whether the page went * the next read(). Here we 'discover' whether the folio went
* uptodate as a result of this (potentially partial) write. * uptodate as a result of this (potentially partial) write.
*/ */
if (!partial) if (!partial)
SetPageUptodate(page); folio_mark_uptodate(folio);
return 0; return 0;
} }
...@@ -2188,10 +2188,9 @@ int block_write_end(struct file *file, struct address_space *mapping, ...@@ -2188,10 +2188,9 @@ int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct page *page, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
unsigned start; size_t start = pos - folio_pos(folio);
start = pos & (PAGE_SIZE - 1);
if (unlikely(copied < len)) { if (unlikely(copied < len)) {
/* /*
...@@ -2203,18 +2202,18 @@ int block_write_end(struct file *file, struct address_space *mapping, ...@@ -2203,18 +2202,18 @@ int block_write_end(struct file *file, struct address_space *mapping,
* read_folio might come in and destroy our partial write. * read_folio might come in and destroy our partial write.
* *
* Do the simplest thing, and just treat any short write to a * Do the simplest thing, and just treat any short write to a
* non uptodate page as a zero-length write, and force the * non uptodate folio as a zero-length write, and force the
* caller to redo the whole thing. * caller to redo the whole thing.
*/ */
if (!PageUptodate(page)) if (!folio_test_uptodate(folio))
copied = 0; copied = 0;
page_zero_new_buffers(page, start+copied, start+len); page_zero_new_buffers(&folio->page, start+copied, start+len);
} }
flush_dcache_page(page); flush_dcache_folio(folio);
/* This could be a short (even 0-length) commit */ /* This could be a short (even 0-length) commit */
__block_commit_write(inode, page, start, start+copied); __block_commit_write(inode, folio, start, start + copied);
return copied; return copied;
} }
...@@ -2537,8 +2536,9 @@ EXPORT_SYMBOL(cont_write_begin); ...@@ -2537,8 +2536,9 @@ EXPORT_SYMBOL(cont_write_begin);
int block_commit_write(struct page *page, unsigned from, unsigned to) int block_commit_write(struct page *page, unsigned from, unsigned to)
{ {
struct inode *inode = page->mapping->host; struct folio *folio = page_folio(page);
__block_commit_write(inode,page,from,to); struct inode *inode = folio->mapping->host;
__block_commit_write(inode, folio, from, to);
return 0; return 0;
} }
EXPORT_SYMBOL(block_commit_write); EXPORT_SYMBOL(block_commit_write);
...@@ -2586,7 +2586,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -2586,7 +2586,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
ret = __block_write_begin_int(folio, 0, end, get_block, NULL); ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
if (!ret) if (!ret)
ret = block_commit_write(&folio->page, 0, end); ret = __block_commit_write(inode, folio, 0, end);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
goto out_unlock; goto out_unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment