Commit f4013ca6 authored by Vishal Verma's avatar Vishal Verma

Merge branch 'for-5.9/dax' into libnvdimm-for-next

This contains a handful of dax changes for v5.9. Of the three commits,
one is a print verbosity change, and two are independent fixes that fell
out of the PKS work [1].

[1]: https://lore.kernel.org/linux-nvdimm/20200717072056.73134-1-ira.weiny@intel.com
parents 48778464 eedfd73d
...@@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, ...@@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
int err, id; int err, id;
if (blocksize != PAGE_SIZE) { if (blocksize != PAGE_SIZE) {
pr_debug("%s: error: unsupported blocksize for dax\n", pr_info("%s: error: unsupported blocksize for dax\n",
bdevname(bdev, buf)); bdevname(bdev, buf));
return false; return false;
} }
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) { if (err) {
pr_debug("%s: error: unaligned partition for dax\n", pr_info("%s: error: unaligned partition for dax\n",
bdevname(bdev, buf)); bdevname(bdev, buf));
return false; return false;
} }
...@@ -95,7 +95,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, ...@@ -95,7 +95,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
if (err) { if (err) {
pr_debug("%s: error: unaligned partition for dax\n", pr_info("%s: error: unaligned partition for dax\n",
bdevname(bdev, buf)); bdevname(bdev, buf));
return false; return false;
} }
...@@ -103,11 +103,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, ...@@ -103,11 +103,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
id = dax_read_lock(); id = dax_read_lock();
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
dax_read_unlock(id);
if (len < 1 || len2 < 1) { if (len < 1 || len2 < 1) {
pr_debug("%s: error: dax access failed (%ld)\n", pr_info("%s: error: dax access failed (%ld)\n",
bdevname(bdev, buf), len < 1 ? len : len2); bdevname(bdev, buf), len < 1 ? len : len2);
dax_read_unlock(id);
return false; return false;
} }
...@@ -137,9 +137,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, ...@@ -137,9 +137,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
put_dev_pagemap(end_pgmap); put_dev_pagemap(end_pgmap);
} }
dax_read_unlock(id);
if (!dax_enabled) { if (!dax_enabled) {
pr_debug("%s: error: dax support not enabled\n", pr_info("%s: error: dax support not enabled\n",
bdevname(bdev, buf)); bdevname(bdev, buf));
return false; return false;
} }
......
...@@ -680,21 +680,20 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, ...@@ -680,21 +680,20 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
return __dax_invalidate_entry(mapping, index, false); return __dax_invalidate_entry(mapping, index, false);
} }
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
sector_t sector, size_t size, struct page *to, sector_t sector, struct page *to, unsigned long vaddr)
unsigned long vaddr)
{ {
void *vto, *kaddr; void *vto, *kaddr;
pgoff_t pgoff; pgoff_t pgoff;
long rc; long rc;
int id; int id;
rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
if (rc) if (rc)
return rc; return rc;
id = dax_read_lock(); id = dax_read_lock();
rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
if (rc < 0) { if (rc < 0) {
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
...@@ -1305,8 +1304,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, ...@@ -1305,8 +1304,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
clear_user_highpage(vmf->cow_page, vaddr); clear_user_highpage(vmf->cow_page, vaddr);
break; break;
case IOMAP_MAPPED: case IOMAP_MAPPED:
error = copy_user_dax(iomap.bdev, iomap.dax_dev, error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
sector, PAGE_SIZE, vmf->cow_page, vaddr); sector, vmf->cow_page, vaddr);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment