Commit 4e96b2db authored by Allison Henderson's avatar Allison Henderson Committed by Theodore Ts'o

ext4: Add new ext4_discard_partial_page_buffers routines

This patch adds two new routines: ext4_discard_partial_page_buffers
and ext4_discard_partial_page_buffers_no_lock.

The ext4_discard_partial_page_buffers routine is a wrapper
function to ext4_discard_partial_page_buffers_no_lock.
The wrapper function locks the page and passes it to
ext4_discard_partial_page_buffers_no_lock.
Calling functions that already have the page locked can call
ext4_discard_partial_page_buffers_no_lock directly.

The ext4_discard_partial_page_buffers_no_lock function
zeros a specified range in a page, and unmaps the
corresponding buffer heads.  Only block aligned regions of the
page will have their buffer heads unmapped.  Unblock aligned regions
will be mapped if needed so that they can be updated with the
partial zero out.  This function is meant to
be used to update a page and its buffer heads to be zeroed
and unmapped when the corresponding blocks have been released
or will be released.

This routine is used in the following scenarios:
* A hole is punched and the non page aligned regions
  of the head and tail of the hole need to be discarded

* The file is truncated and the partial page beyond EOF needs
  to be discarded

* The end of a hole is in the same page as EOF.  After the
  page is flushed, the partial page beyond EOF needs to be
  discarded.

* A write operation begins or ends inside a hole and the partial
  page appearing before or after the write needs to be discarded

* A write operation extends EOF and the partial page beyond EOF
  needs to be discarded

This function takes a flag EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
which is used when a write operation begins or ends in a hole.
When the EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED flag is used, only
buffer heads that are already unmapped will have the corresponding
regions of the page zeroed.
Signed-off-by: default avatarAllison Henderson <achender@linux.vnet.ibm.com>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 5930ea64
...@@ -528,6 +528,11 @@ struct ext4_new_group_data { ...@@ -528,6 +528,11 @@ struct ext4_new_group_data {
#define EXT4_FREE_BLOCKS_VALIDATED 0x0004 #define EXT4_FREE_BLOCKS_VALIDATED 0x0004
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008 #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
/*
* Flags used by ext4_discard_partial_page_buffers
*/
#define EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 0x0001
/* /*
* ioctl commands * ioctl commands
*/ */
...@@ -1838,6 +1843,12 @@ extern int ext4_block_truncate_page(handle_t *handle, ...@@ -1838,6 +1843,12 @@ extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from); struct address_space *mapping, loff_t from);
extern int ext4_block_zero_page_range(handle_t *handle, extern int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length); struct address_space *mapping, loff_t from, loff_t length);
extern int ext4_discard_partial_page_buffers(handle_t *handle,
struct address_space *mapping, loff_t from,
loff_t length, int flags);
extern int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
struct inode *inode, struct page *page, loff_t from,
loff_t length, int flags);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode, extern void ext4_da_update_reserve_space(struct inode *inode,
......
...@@ -2966,6 +2966,230 @@ void ext4_set_aops(struct inode *inode) ...@@ -2966,6 +2966,230 @@ void ext4_set_aops(struct inode *inode)
inode->i_mapping->a_ops = &ext4_journalled_aops; inode->i_mapping->a_ops = &ext4_journalled_aops;
} }
/*
* ext4_discard_partial_page_buffers()
* Wrapper function for ext4_discard_partial_page_buffers_no_lock.
* This function finds and locks the page containing the offset
* "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
* Calling functions that already have the page locked should call
* ext4_discard_partial_page_buffers_no_lock directly.
*/
int ext4_discard_partial_page_buffers(handle_t *handle,
struct address_space *mapping, loff_t from,
loff_t length, int flags)
{
struct inode *inode = mapping->host;
struct page *page;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
mapping_gfp_mask(mapping) & ~__GFP_FS);
if (!page)
return -EINVAL;
err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
from, length, flags);
unlock_page(page);
page_cache_release(page);
return err;
}
/*
* ext4_discard_partial_page_buffers_no_lock()
* Zeros a page range of length 'length' starting from offset 'from'.
* Buffer heads that correspond to the block aligned regions of the
* zeroed range will be unmapped. Unblock aligned regions
* will have the corresponding buffer head mapped if needed so that
* that region of the page can be updated with the partial zero out.
*
* This function assumes that the page has already been locked. The
* The range to be discarded must be contained with in the given page.
* If the specified range exceeds the end of the page it will be shortened
* to the end of the page that corresponds to 'from'. This function is
* appropriate for updating a page and it buffer heads to be unmapped and
* zeroed for blocks that have been either released, or are going to be
* released.
*
* handle: The journal handle
* inode: The files inode
* page: A locked page that contains the offset "from"
* from: The starting byte offset (from the begining of the file)
* to begin discarding
* len: The length of bytes to discard
* flags: Optional flags that may be used:
*
* EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
* Only zero the regions of the page whose buffer heads
* have already been unmapped. This flag is appropriate
* for updateing the contents of a page whose blocks may
* have already been released, and we only want to zero
* out the regions that correspond to those released blocks.
*
* Returns zero on sucess or negative on failure.
*/
int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
struct inode *inode, struct page *page, loff_t from,
loff_t length, int flags)
{
ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
unsigned int offset = from & (PAGE_CACHE_SIZE-1);
unsigned int blocksize, max, pos;
unsigned int end_of_block, range_to_discard;
ext4_lblk_t iblock;
struct buffer_head *bh;
int err = 0;
blocksize = inode->i_sb->s_blocksize;
max = PAGE_CACHE_SIZE - offset;
if (index != page->index)
return -EINVAL;
/*
* correct length if it does not fall between
* 'from' and the end of the page
*/
if (length > max || length < 0)
length = max;
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page)) {
/*
* If the range to be discarded covers a partial block
* we need to get the page buffers. This is because
* partial blocks cannot be released and the page needs
* to be updated with the contents of the block before
* we write the zeros on top of it.
*/
if (!(from & (blocksize - 1)) ||
!((from + length) & (blocksize - 1))) {
create_empty_buffers(page, blocksize, 0);
} else {
/*
* If there are no partial blocks,
* there is nothing to update,
* so we can return now
*/
return 0;
}
}
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
pos = offset;
while (pos < offset + length) {
err = 0;
/* The length of space left to zero and unmap */
range_to_discard = offset + length - pos;
/* The length of space until the end of the block */
end_of_block = blocksize - (pos & (blocksize-1));
/*
* Do not unmap or zero past end of block
* for this buffer head
*/
if (range_to_discard > end_of_block)
range_to_discard = end_of_block;
/*
* Skip this buffer head if we are only zeroing unampped
* regions of the page
*/
if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
buffer_mapped(bh))
goto next;
/* If the range is block aligned, unmap */
if (range_to_discard == blocksize) {
clear_buffer_dirty(bh);
bh->b_bdev = NULL;
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
clear_buffer_uptodate(bh);
zero_user(page, pos, range_to_discard);
BUFFER_TRACE(bh, "Buffer discarded");
goto next;
}
/*
* If this block is not completely contained in the range
* to be discarded, then it is not going to be released. Because
* we need to keep this block, we need to make sure this part
* of the page is uptodate before we modify it by writeing
* partial zeros on it.
*/
if (!buffer_mapped(bh)) {
/*
* Buffer head must be mapped before we can read
* from the block
*/
BUFFER_TRACE(bh, "unmapped");
ext4_get_block(inode, iblock, bh, 0);
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "still unmapped");
goto next;
}
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt.*/
if (!buffer_uptodate(bh))
goto next;
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
goto next;
}
zero_user(page, pos, range_to_discard);
err = 0;
if (ext4_should_journal_data(inode)) {
err = ext4_handle_dirty_metadata(handle, inode, bh);
} else {
if (ext4_should_order_data(inode) &&
EXT4_I(inode)->jinode)
err = ext4_jbd2_file_inode(handle, inode);
mark_buffer_dirty(bh);
}
BUFFER_TRACE(bh, "Partial buffer zeroed");
next:
bh = bh->b_this_page;
iblock++;
pos += range_to_discard;
}
return err;
}
/* /*
* ext4_block_truncate_page() zeroes out a mapping from file offset `from' * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
* up to the end of the block which corresponds to `from'. * up to the end of the block which corresponds to `from'.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment