Commit b3cfa35a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Chris Mason

Btrfs: factor page private preparations into a helper

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 8e1cd766
...@@ -1379,6 +1379,16 @@ static int submit_extent_page(int rw, struct extent_map_tree *tree, ...@@ -1379,6 +1379,16 @@ static int submit_extent_page(int rw, struct extent_map_tree *tree,
return ret; return ret;
} }
void set_page_extent_mapped(struct page *page)
{
if (!PagePrivate(page)) {
SetPagePrivate(page);
WARN_ON(!page->mapping->a_ops->invalidatepage);
set_page_private(page, 1);
page_cache_get(page);
}
}
/* /*
* basic readpage implementation. Locked extent state structs are inserted * basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io * into the tree that are removed when the IO is done (by the end_io
...@@ -1405,12 +1415,7 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page, ...@@ -1405,12 +1415,7 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
size_t iosize; size_t iosize;
size_t blocksize = inode->i_sb->s_blocksize; size_t blocksize = inode->i_sb->s_blocksize;
if (!PagePrivate(page)) { set_page_extent_mapped(page);
SetPagePrivate(page);
WARN_ON(!page->mapping->a_ops->invalidatepage);
set_page_private(page, 1);
page_cache_get(page);
}
end = page_end; end = page_end;
lock_extent(tree, start, end, GFP_NOFS); lock_extent(tree, start, end, GFP_NOFS);
...@@ -1531,12 +1536,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page, ...@@ -1531,12 +1536,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
PAGE_CACHE_SIZE - offset, KM_USER0); PAGE_CACHE_SIZE - offset, KM_USER0);
} }
if (!PagePrivate(page)) { set_page_extent_mapped(page);
SetPagePrivate(page);
set_page_private(page, 1);
WARN_ON(!page->mapping->a_ops->invalidatepage);
page_cache_get(page);
}
lock_extent(tree, start, page_end, GFP_NOFS); lock_extent(tree, start, page_end, GFP_NOFS);
nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1, nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
...@@ -1672,13 +1672,7 @@ int extent_commit_write(struct extent_map_tree *tree, ...@@ -1672,13 +1672,7 @@ int extent_commit_write(struct extent_map_tree *tree,
{ {
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
if (!PagePrivate(page)) { set_page_extent_mapped(page);
SetPagePrivate(page);
set_page_private(page, 1);
WARN_ON(!page->mapping->a_ops->invalidatepage);
page_cache_get(page);
}
set_page_dirty(page); set_page_dirty(page);
if (pos > inode->i_size) { if (pos > inode->i_size) {
...@@ -1709,12 +1703,8 @@ int extent_prepare_write(struct extent_map_tree *tree, ...@@ -1709,12 +1703,8 @@ int extent_prepare_write(struct extent_map_tree *tree,
int ret = 0; int ret = 0;
int isnew; int isnew;
if (!PagePrivate(page)) { set_page_extent_mapped(page);
SetPagePrivate(page);
set_page_private(page, 1);
WARN_ON(!page->mapping->a_ops->invalidatepage);
page_cache_get(page);
}
block_start = (page_start + from) & ~((u64)blocksize - 1); block_start = (page_start + from) & ~((u64)blocksize - 1);
block_end = (page_start + to - 1) | (blocksize - 1); block_end = (page_start + to - 1) | (blocksize - 1);
orig_block_start = block_start; orig_block_start = block_start;
......
...@@ -105,4 +105,5 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, ...@@ -105,4 +105,5 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end); int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end);
int set_state_private(struct extent_map_tree *tree, u64 start, u64 private); int set_state_private(struct extent_map_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private); int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
#endif #endif
...@@ -543,12 +543,7 @@ static int prepare_pages(struct btrfs_root *root, ...@@ -543,12 +543,7 @@ static int prepare_pages(struct btrfs_root *root,
} }
cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
wait_on_page_writeback(pages[i]); wait_on_page_writeback(pages[i]);
if (!PagePrivate(pages[i])) { set_page_extent_mapped(pages[i]);
SetPagePrivate(pages[i]);
set_page_private(pages[i], 1);
WARN_ON(!pages[i]->mapping->a_ops->invalidatepage);
page_cache_get(pages[i]);
}
WARN_ON(!PageLocked(pages[i])); WARN_ON(!PageLocked(pages[i]));
} }
return 0; return 0;
......
...@@ -652,12 +652,7 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page, ...@@ -652,12 +652,7 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page,
u64 page_start = page->index << PAGE_CACHE_SHIFT; u64 page_start = page->index << PAGE_CACHE_SHIFT;
u64 page_end = page_start + PAGE_CACHE_SIZE - 1; u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
if (!PagePrivate(page)) { set_page_extent_mapped(page);
SetPagePrivate(page);
set_page_private(page, 1);
WARN_ON(!page->mapping->a_ops->invalidatepage);
page_cache_get(page);
}
lock_extent(em_tree, page_start, page_end, GFP_NOFS); lock_extent(em_tree, page_start, page_end, GFP_NOFS);
set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start, set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment