Commit cc4a90ac authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Dan Williams

dax: dax_insert_mapping_entry always succeeds

It does not return an error, so we don't need to check the return value
for IS_ERR().  Indeed, it is a bug to do so; with a sufficiently large
PFN, a legitimate DAX entry may be mistaken for an error return.
Signed-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent d6dc57e2
...@@ -1009,7 +1009,6 @@ static int dax_load_hole(struct address_space *mapping, void *entry, ...@@ -1009,7 +1009,6 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
unsigned long vaddr = vmf->address; unsigned long vaddr = vmf->address;
int ret = VM_FAULT_NOPAGE; int ret = VM_FAULT_NOPAGE;
struct page *zero_page; struct page *zero_page;
void *entry2;
pfn_t pfn; pfn_t pfn;
zero_page = ZERO_PAGE(0); zero_page = ZERO_PAGE(0);
...@@ -1019,13 +1018,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry, ...@@ -1019,13 +1018,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
} }
pfn = page_to_pfn_t(zero_page); pfn = page_to_pfn_t(zero_page);
entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn, dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
RADIX_DAX_ZERO_PAGE, false); false);
if (IS_ERR(entry2)) {
ret = VM_FAULT_SIGBUS;
goto out;
}
vm_insert_mixed(vmf->vma, vaddr, pfn); vm_insert_mixed(vmf->vma, vaddr, pfn);
out: out:
trace_dax_load_hole(inode, vmf, ret); trace_dax_load_hole(inode, vmf, ret);
...@@ -1337,10 +1331,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, ...@@ -1337,10 +1331,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
0, write && !sync); 0, write && !sync);
if (IS_ERR(entry)) {
error = PTR_ERR(entry);
goto error_finish_iomap;
}
/* /*
* If we are doing synchronous page fault and inode needs fsync, * If we are doing synchronous page fault and inode needs fsync,
...@@ -1424,8 +1414,6 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, ...@@ -1424,8 +1414,6 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
pfn = page_to_pfn_t(zero_page); pfn = page_to_pfn_t(zero_page);
ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
if (IS_ERR(ret))
goto fallback;
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (!pmd_none(*(vmf->pmd))) { if (!pmd_none(*(vmf->pmd))) {
...@@ -1547,8 +1535,6 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, ...@@ -1547,8 +1535,6 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
RADIX_DAX_PMD, write && !sync); RADIX_DAX_PMD, write && !sync);
if (IS_ERR(entry))
goto finish_iomap;
/* /*
* If we are doing synchronous page fault and inode needs fsync, * If we are doing synchronous page fault and inode needs fsync,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment