Commit 73449daf authored by Dan Williams's avatar Dan Williams Committed by Dave Jiang

filesystem-dax: Set page->index

In support of enabling memory_failure() handling for filesystem-dax
mappings, set ->index to the pgoff of the page. The rmap implementation
requires ->index to bound the search through the vma interval tree. The
index is set and cleared at dax_associate_entry() and
dax_disassociate_entry() time respectively.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
parent 35de2995
...@@ -319,18 +319,27 @@ static unsigned long dax_radix_end_pfn(void *entry) ...@@ -319,18 +319,27 @@ static unsigned long dax_radix_end_pfn(void *entry)
for (pfn = dax_radix_pfn(entry); \ for (pfn = dax_radix_pfn(entry); \
pfn < dax_radix_end_pfn(entry); pfn++) pfn < dax_radix_end_pfn(entry); pfn++)
static void dax_associate_entry(void *entry, struct address_space *mapping) /*
* TODO: for reflink+dax we need a way to associate a single page with
* multiple address_space instances at different linear_page_index()
* offsets.
*/
static void dax_associate_entry(void *entry, struct address_space *mapping,
struct vm_area_struct *vma, unsigned long address)
{ {
unsigned long pfn; unsigned long size = dax_entry_size(entry), pfn, index;
int i = 0;
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return; return;
index = linear_page_index(vma, address & ~(size - 1));
for_each_mapped_pfn(entry, pfn) { for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
WARN_ON_ONCE(page->mapping); WARN_ON_ONCE(page->mapping);
page->mapping = mapping; page->mapping = mapping;
page->index = index + i++;
} }
} }
...@@ -348,6 +357,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping, ...@@ -348,6 +357,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
WARN_ON_ONCE(trunc && page_ref_count(page) > 1); WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
WARN_ON_ONCE(page->mapping && page->mapping != mapping); WARN_ON_ONCE(page->mapping && page->mapping != mapping);
page->mapping = NULL; page->mapping = NULL;
page->index = 0;
} }
} }
...@@ -701,7 +711,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, ...@@ -701,7 +711,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
new_entry = dax_radix_locked_entry(pfn, flags); new_entry = dax_radix_locked_entry(pfn, flags);
if (dax_entry_size(entry) != dax_entry_size(new_entry)) { if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
dax_disassociate_entry(entry, mapping, false); dax_disassociate_entry(entry, mapping, false);
dax_associate_entry(new_entry, mapping); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
} }
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment