Commit 89eb946a authored by Matthew Wilcox's avatar Matthew Wilcox

mm: Convert page migration to XArray

Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 560d454b
...@@ -323,7 +323,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, ...@@ -323,7 +323,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
page = migration_entry_to_page(entry); page = migration_entry_to_page(entry);
/* /*
* Once radix-tree replacement of page migration started, page_count * Once page cache replacement of page migration started, page_count
* *must* be zero. And, we don't want to call wait_on_page_locked() * *must* be zero. And, we don't want to call wait_on_page_locked()
* against a page without get_page(). * against a page without get_page().
* So, we use get_page_unless_zero(), here. Even failed, page fault * So, we use get_page_unless_zero(), here. Even failed, page fault
...@@ -438,10 +438,10 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -438,10 +438,10 @@ int migrate_page_move_mapping(struct address_space *mapping,
struct buffer_head *head, enum migrate_mode mode, struct buffer_head *head, enum migrate_mode mode,
int extra_count) int extra_count)
{ {
XA_STATE(xas, &mapping->i_pages, page_index(page));
struct zone *oldzone, *newzone; struct zone *oldzone, *newzone;
int dirty; int dirty;
int expected_count = 1 + extra_count; int expected_count = 1 + extra_count;
void **pslot;
/* /*
* Device public or private pages have an extra refcount as they are * Device public or private pages have an extra refcount as they are
...@@ -467,21 +467,16 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -467,21 +467,16 @@ int migrate_page_move_mapping(struct address_space *mapping,
oldzone = page_zone(page); oldzone = page_zone(page);
newzone = page_zone(newpage); newzone = page_zone(newpage);
xa_lock_irq(&mapping->i_pages); xas_lock_irq(&xas);
pslot = radix_tree_lookup_slot(&mapping->i_pages,
page_index(page));
expected_count += hpage_nr_pages(page) + page_has_private(page); expected_count += hpage_nr_pages(page) + page_has_private(page);
if (page_count(page) != expected_count || if (page_count(page) != expected_count || xas_load(&xas) != page) {
radix_tree_deref_slot_protected(pslot, xas_unlock_irq(&xas);
&mapping->i_pages.xa_lock) != page) {
xa_unlock_irq(&mapping->i_pages);
return -EAGAIN; return -EAGAIN;
} }
if (!page_ref_freeze(page, expected_count)) { if (!page_ref_freeze(page, expected_count)) {
xa_unlock_irq(&mapping->i_pages); xas_unlock_irq(&xas);
return -EAGAIN; return -EAGAIN;
} }
...@@ -495,7 +490,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -495,7 +490,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
if (mode == MIGRATE_ASYNC && head && if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) { !buffer_migrate_lock_buffers(head, mode)) {
page_ref_unfreeze(page, expected_count); page_ref_unfreeze(page, expected_count);
xa_unlock_irq(&mapping->i_pages); xas_unlock_irq(&xas);
return -EAGAIN; return -EAGAIN;
} }
...@@ -523,16 +518,13 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -523,16 +518,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
SetPageDirty(newpage); SetPageDirty(newpage);
} }
radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); xas_store(&xas, newpage);
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
int i; int i;
int index = page_index(page);
for (i = 1; i < HPAGE_PMD_NR; i++) { for (i = 1; i < HPAGE_PMD_NR; i++) {
pslot = radix_tree_lookup_slot(&mapping->i_pages, xas_next(&xas);
index + i); xas_store(&xas, newpage + i);
radix_tree_replace_slot(&mapping->i_pages, pslot,
newpage + i);
} }
} }
...@@ -543,7 +535,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -543,7 +535,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/ */
page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
xa_unlock(&mapping->i_pages); xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */ /* Leave irq disabled to prevent preemption while updating stats */
/* /*
...@@ -583,22 +575,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping); ...@@ -583,22 +575,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping);
int migrate_huge_page_move_mapping(struct address_space *mapping, int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page) struct page *newpage, struct page *page)
{ {
XA_STATE(xas, &mapping->i_pages, page_index(page));
int expected_count; int expected_count;
void **pslot;
xa_lock_irq(&mapping->i_pages);
pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));
xas_lock_irq(&xas);
expected_count = 2 + page_has_private(page); expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count || if (page_count(page) != expected_count || xas_load(&xas) != page) {
radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { xas_unlock_irq(&xas);
xa_unlock_irq(&mapping->i_pages);
return -EAGAIN; return -EAGAIN;
} }
if (!page_ref_freeze(page, expected_count)) { if (!page_ref_freeze(page, expected_count)) {
xa_unlock_irq(&mapping->i_pages); xas_unlock_irq(&xas);
return -EAGAIN; return -EAGAIN;
} }
...@@ -607,11 +595,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, ...@@ -607,11 +595,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
get_page(newpage); get_page(newpage);
radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); xas_store(&xas, newpage);
page_ref_unfreeze(page, expected_count - 1); page_ref_unfreeze(page, expected_count - 1);
xa_unlock_irq(&mapping->i_pages); xas_unlock_irq(&xas);
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment