Commit f611ff63 authored by Matthew Wilcox's avatar Matthew Wilcox

nilfs2: Convert to XArray

This is close to a 1:1 replacement of radix tree APIs with their XArray
equivalents.  It would be possible to optimise nilfs_copy_back_pages(),
but that doesn't seem to be in the performance path.  Also, I think
it has a pre-existing bug, and I've added a note to that effect in the
source code.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 04edf02c
...@@ -168,24 +168,18 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, ...@@ -168,24 +168,18 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
ctxt->newbh = NULL; ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_SHIFT) { if (inode->i_blkbits == PAGE_SHIFT) {
lock_page(obh->b_page); struct page *opage = obh->b_page;
/* lock_page(opage);
* We cannot call radix_tree_preload for the kernels older
* than 2.6.23, because it is not exported for modules.
*/
retry: retry:
err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (err)
goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */ /* BUG_ON(oldkey != obh->b_page->index); */
if (unlikely(oldkey != obh->b_page->index)) if (unlikely(oldkey != opage->index))
NILFS_PAGE_BUG(obh->b_page, NILFS_PAGE_BUG(opage,
"invalid oldkey %lld (newkey=%lld)", "invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey, (unsigned long long)oldkey,
(unsigned long long)newkey); (unsigned long long)newkey);
xa_lock_irq(&btnc->i_pages); xa_lock_irq(&btnc->i_pages);
err = radix_tree_insert(&btnc->i_pages, newkey, obh->b_page); err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS);
xa_unlock_irq(&btnc->i_pages); xa_unlock_irq(&btnc->i_pages);
/* /*
* Note: page->index will not change to newkey until * Note: page->index will not change to newkey until
...@@ -193,7 +187,6 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, ...@@ -193,7 +187,6 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
* To protect the page in intermediate state, the page lock * To protect the page in intermediate state, the page lock
* is held. * is held.
*/ */
radix_tree_preload_end();
if (!err) if (!err)
return 0; return 0;
else if (err != -EEXIST) else if (err != -EEXIST)
...@@ -203,7 +196,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, ...@@ -203,7 +196,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
if (!err) if (!err)
goto retry; goto retry;
/* fallback to copy mode */ /* fallback to copy mode */
unlock_page(obh->b_page); unlock_page(opage);
} }
nbh = nilfs_btnode_create_block(btnc, newkey); nbh = nilfs_btnode_create_block(btnc, newkey);
...@@ -243,9 +236,8 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc, ...@@ -243,9 +236,8 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
mark_buffer_dirty(obh); mark_buffer_dirty(obh);
xa_lock_irq(&btnc->i_pages); xa_lock_irq(&btnc->i_pages);
radix_tree_delete(&btnc->i_pages, oldkey); __xa_erase(&btnc->i_pages, oldkey);
radix_tree_tag_set(&btnc->i_pages, newkey, __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&btnc->i_pages); xa_unlock_irq(&btnc->i_pages);
opage->index = obh->b_blocknr = newkey; opage->index = obh->b_blocknr = newkey;
...@@ -275,7 +267,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc, ...@@ -275,7 +267,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
if (nbh == NULL) { /* blocksize == pagesize */ if (nbh == NULL) { /* blocksize == pagesize */
xa_lock_irq(&btnc->i_pages); xa_lock_irq(&btnc->i_pages);
radix_tree_delete(&btnc->i_pages, newkey); __xa_erase(&btnc->i_pages, newkey);
xa_unlock_irq(&btnc->i_pages); xa_unlock_irq(&btnc->i_pages);
unlock_page(ctxt->bh->b_page); unlock_page(ctxt->bh->b_page);
} else } else
......
...@@ -289,7 +289,7 @@ int nilfs_copy_dirty_pages(struct address_space *dmap, ...@@ -289,7 +289,7 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
* @dmap: destination page cache * @dmap: destination page cache
* @smap: source page cache * @smap: source page cache
* *
* No pages must no be added to the cache during this process. * No pages must be added to the cache during this process.
* This must be ensured by the caller. * This must be ensured by the caller.
*/ */
void nilfs_copy_back_pages(struct address_space *dmap, void nilfs_copy_back_pages(struct address_space *dmap,
...@@ -298,7 +298,6 @@ void nilfs_copy_back_pages(struct address_space *dmap, ...@@ -298,7 +298,6 @@ void nilfs_copy_back_pages(struct address_space *dmap,
struct pagevec pvec; struct pagevec pvec;
unsigned int i, n; unsigned int i, n;
pgoff_t index = 0; pgoff_t index = 0;
int err;
pagevec_init(&pvec); pagevec_init(&pvec);
repeat: repeat:
...@@ -313,35 +312,34 @@ void nilfs_copy_back_pages(struct address_space *dmap, ...@@ -313,35 +312,34 @@ void nilfs_copy_back_pages(struct address_space *dmap,
lock_page(page); lock_page(page);
dpage = find_lock_page(dmap, offset); dpage = find_lock_page(dmap, offset);
if (dpage) { if (dpage) {
/* override existing page on the destination cache */ /* overwrite existing page in the destination cache */
WARN_ON(PageDirty(dpage)); WARN_ON(PageDirty(dpage));
nilfs_copy_page(dpage, page, 0); nilfs_copy_page(dpage, page, 0);
unlock_page(dpage); unlock_page(dpage);
put_page(dpage); put_page(dpage);
/* Do we not need to remove page from smap here? */
} else { } else {
struct page *page2; struct page *p;
/* move the page to the destination cache */ /* move the page to the destination cache */
xa_lock_irq(&smap->i_pages); xa_lock_irq(&smap->i_pages);
page2 = radix_tree_delete(&smap->i_pages, offset); p = __xa_erase(&smap->i_pages, offset);
WARN_ON(page2 != page); WARN_ON(page != p);
smap->nrpages--; smap->nrpages--;
xa_unlock_irq(&smap->i_pages); xa_unlock_irq(&smap->i_pages);
xa_lock_irq(&dmap->i_pages); xa_lock_irq(&dmap->i_pages);
err = radix_tree_insert(&dmap->i_pages, offset, page); p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
if (unlikely(err < 0)) { if (unlikely(p)) {
WARN_ON(err == -EEXIST); /* Probably -ENOMEM */
page->mapping = NULL; page->mapping = NULL;
put_page(page); /* for cache */ put_page(page);
} else { } else {
page->mapping = dmap; page->mapping = dmap;
dmap->nrpages++; dmap->nrpages++;
if (PageDirty(page)) if (PageDirty(page))
radix_tree_tag_set(&dmap->i_pages, __xa_set_mark(&dmap->i_pages, offset,
offset, PAGECACHE_TAG_DIRTY);
PAGECACHE_TAG_DIRTY);
} }
xa_unlock_irq(&dmap->i_pages); xa_unlock_irq(&dmap->i_pages);
} }
...@@ -467,8 +465,7 @@ int __nilfs_clear_page_dirty(struct page *page) ...@@ -467,8 +465,7 @@ int __nilfs_clear_page_dirty(struct page *page)
if (mapping) { if (mapping) {
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
if (test_bit(PG_dirty, &page->flags)) { if (test_bit(PG_dirty, &page->flags)) {
radix_tree_tag_clear(&mapping->i_pages, __xa_clear_mark(&mapping->i_pages, page_index(page),
page_index(page),
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
return clear_page_dirty_for_io(page); return clear_page_dirty_for_io(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment