Commit 552446a4 authored by Matthew Wilcox's avatar Matthew Wilcox

shmem: Convert shmem_add_to_page_cache to XArray

We can use xas_find_conflict() instead of radix_tree_gang_lookup_slot()
to find any conflicting entry and combine the three paths through this
function into one.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent e21a2955
...@@ -577,9 +577,11 @@ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) ...@@ -577,9 +577,11 @@ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
*/ */
static int shmem_add_to_page_cache(struct page *page, static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping, struct address_space *mapping,
pgoff_t index, void *expected) pgoff_t index, void *expected, gfp_t gfp)
{ {
int error, nr = hpage_nr_pages(page); XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
unsigned long i = 0;
unsigned long nr = 1UL << compound_order(page);
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(index != round_down(index, nr), page); VM_BUG_ON_PAGE(index != round_down(index, nr), page);
...@@ -591,46 +593,39 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -591,46 +593,39 @@ static int shmem_add_to_page_cache(struct page *page,
page->mapping = mapping; page->mapping = mapping;
page->index = index; page->index = index;
xa_lock_irq(&mapping->i_pages); do {
if (PageTransHuge(page)) { void *entry;
void __rcu **results; xas_lock_irq(&xas);
pgoff_t idx; entry = xas_find_conflict(&xas);
int i; if (entry != expected)
xas_set_err(&xas, -EEXIST);
error = 0; xas_create_range(&xas);
if (radix_tree_gang_lookup_slot(&mapping->i_pages, if (xas_error(&xas))
&results, &idx, index, 1) && goto unlock;
idx < index + HPAGE_PMD_NR) { next:
error = -EEXIST; xas_store(&xas, page + i);
if (++i < nr) {
xas_next(&xas);
goto next;
} }
if (PageTransHuge(page)) {
if (!error) {
for (i = 0; i < HPAGE_PMD_NR; i++) {
error = radix_tree_insert(&mapping->i_pages,
index + i, page + i);
VM_BUG_ON(error);
}
count_vm_event(THP_FILE_ALLOC); count_vm_event(THP_FILE_ALLOC);
__inc_node_page_state(page, NR_SHMEM_THPS);
} }
} else if (!expected) {
error = radix_tree_insert(&mapping->i_pages, index, page);
} else {
error = shmem_replace_entry(mapping, index, expected, page);
}
if (!error) {
mapping->nrpages += nr; mapping->nrpages += nr;
if (PageTransHuge(page))
__inc_node_page_state(page, NR_SHMEM_THPS);
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
xa_unlock_irq(&mapping->i_pages); unlock:
} else { xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp));
if (xas_error(&xas)) {
page->mapping = NULL; page->mapping = NULL;
xa_unlock_irq(&mapping->i_pages);
page_ref_sub(page, nr); page_ref_sub(page, nr);
return xas_error(&xas);
} }
return error;
return 0;
} }
/* /*
...@@ -1183,7 +1178,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, ...@@ -1183,7 +1178,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
*/ */
if (!error) if (!error)
error = shmem_add_to_page_cache(*pagep, mapping, index, error = shmem_add_to_page_cache(*pagep, mapping, index,
radswap); radswap, gfp);
if (error != -ENOMEM) { if (error != -ENOMEM) {
/* /*
* Truncation and eviction use free_swap_and_cache(), which * Truncation and eviction use free_swap_and_cache(), which
...@@ -1700,7 +1695,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1700,7 +1695,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
false); false);
if (!error) { if (!error) {
error = shmem_add_to_page_cache(page, mapping, index, error = shmem_add_to_page_cache(page, mapping, index,
swp_to_radix_entry(swap)); swp_to_radix_entry(swap), gfp);
/* /*
* We already confirmed swap under page lock, and make * We already confirmed swap under page lock, and make
* no memory allocation here, so usually no possibility * no memory allocation here, so usually no possibility
...@@ -1806,13 +1801,8 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, ...@@ -1806,13 +1801,8 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
PageTransHuge(page)); PageTransHuge(page));
if (error) if (error)
goto unacct; goto unacct;
error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, error = shmem_add_to_page_cache(page, mapping, hindex,
compound_order(page)); NULL, gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, hindex,
NULL);
radix_tree_preload_end();
}
if (error) { if (error) {
mem_cgroup_cancel_charge(page, memcg, mem_cgroup_cancel_charge(page, memcg,
PageTransHuge(page)); PageTransHuge(page));
...@@ -2281,11 +2271,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2281,11 +2271,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (ret) if (ret)
goto out_release; goto out_release;
ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
if (!ret) { gfp & GFP_RECLAIM_MASK);
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
radix_tree_preload_end();
}
if (ret) if (ret)
goto out_release_uncharge; goto out_release_uncharge;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment