Commit c1901cd3 authored by Matthew Wilcox's avatar Matthew Wilcox

page cache: Convert find_get_entries_tag to XArray

Slightly shorter and simpler code.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent a6906972
...@@ -373,7 +373,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping, ...@@ -373,7 +373,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
nr_pages, pages); nr_pages, pages);
} }
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries, xa_mark_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices); struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping, struct page *grab_cache_page_write_begin(struct address_space *mapping,
......
...@@ -1866,57 +1866,51 @@ EXPORT_SYMBOL(find_get_pages_range_tag); ...@@ -1866,57 +1866,51 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
* @tag. * @tag.
*/ */
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries, xa_mark_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices) struct page **entries, pgoff_t *indices)
{ {
void **slot; XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
unsigned int ret = 0; unsigned int ret = 0;
struct radix_tree_iter iter;
if (!nr_entries) if (!nr_entries)
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) { xas_for_each_marked(&xas, page, ULONG_MAX, tag) {
struct page *head, *page; struct page *head;
repeat: if (xas_retry(&xas, page))
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue; continue;
}
/* /*
* A shadow entry of a recently evicted page, a swap * A shadow entry of a recently evicted page, a swap
* entry from shmem/tmpfs or a DAX entry. Return it * entry from shmem/tmpfs or a DAX entry. Return it
* without attempting to raise page count. * without attempting to raise page count.
*/ */
if (xa_is_value(page))
goto export; goto export;
}
head = compound_head(page); head = compound_head(page);
if (!page_cache_get_speculative(head)) if (!page_cache_get_speculative(head))
goto repeat; goto retry;
/* The page was split under us? */ /* The page was split under us? */
if (compound_head(page) != head) { if (compound_head(page) != head)
put_page(head); goto put_page;
goto repeat;
}
/* Has the page moved? */ /* Has the page moved? */
if (unlikely(page != *slot)) { if (unlikely(page != xas_reload(&xas)))
put_page(head); goto put_page;
goto repeat;
}
export: export:
indices[ret] = iter.index; indices[ret] = xas.xa_index;
entries[ret] = page; entries[ret] = page;
if (++ret == nr_entries) if (++ret == nr_entries)
break; break;
continue;
put_page:
put_page(head);
retry:
xas_reset(&xas);
} }
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment