Commit 315227f6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dax-misc-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull misc DAX updates from Vishal Verma:
 "DAX error handling for 4.7

   - Until now, dax has been disabled if media errors were found on any
     device.  This enables the use of DAX in the presence of these
     errors by making all sector-aligned zeroing go through the driver.

   - The driver (already) has the ability to clear errors on writes that
     are sent through the block layer using 'DSMs' defined in ACPI 6.1.

  Other misc changes:

   - When mounting DAX filesystems, check to make sure the partition is
     page aligned.  This is a requirement for DAX, and previously, we
     allowed such unaligned mounts to succeed, but subsequent
     reads/writes would fail.

   - Misc/cleanup fixes from Jan that remove unused code from DAX
     related to zeroing, writeback, and some size checks"

* tag 'dax-misc-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  dax: fix a comment in dax_zero_page_range and dax_truncate_page
  dax: for truncate/hole-punch, do zeroing through the driver if possible
  dax: export a low-level __dax_zero_page_range helper
  dax: use sb_issue_zerout instead of calling dax_clear_sectors
  dax: enable dax in the presence of known media errors (badblocks)
  dax: fallback from pmd to pte on error
  block: Update blkdev_dax_capable() for consistency
  xfs: Add alignment check for DAX mount
  ext2: Add alignment check for DAX mount
  ext4: Add alignment check for DAX mount
  block: Add bdev_dax_supported() for dax mount checks
  block: Add vfs_msg() interface
  dax: Remove redundant inode size checks
  dax: Remove pointless writeback from dax_do_io()
  dax: Remove zeroing from dax_io()
  dax: Remove dead zeroing code from fault handlers
  ext2: Avoid DAX zeroing to corrupt data
  ext2: Fix block zeroing in ext2_get_blocks() for DAX
  dax: Remove complete_unwritten argument
  DAX: move RADIX_DAX_ definitions to dax.c
parents a10c38a4 40543f62
...@@ -79,6 +79,38 @@ These filesystems may be used for inspiration: ...@@ -79,6 +79,38 @@ These filesystems may be used for inspiration:
- ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt - ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt
Handling Media Errors
---------------------
The libnvdimm subsystem stores a record of known media error locations for
each pmem block device (in gendisk->badblocks). If we fault at such location,
or one with a latent error not yet discovered, the application can expect
to receive a SIGBUS. Libnvdimm also allows clearing of these errors by simply
writing the affected sectors (through the pmem driver, and if the underlying
NVDIMM supports the clear_poison DSM defined by ACPI).
Since DAX IO normally doesn't go through the driver/bio path, applications or
sysadmins have an option to restore the lost data from a prior backup/inbuilt
redundancy in the following ways:
1. Delete the affected file, and restore from a backup (sysadmin route):
This will free the file system blocks that were being used by the file,
and the next time they're allocated, they will be zeroed first, which
happens through the driver, and will clear bad sectors.
2. Truncate or hole-punch the part of the file that has a bad-block (at least
an entire aligned sector has to be hole-punched, but not necessarily an
entire filesystem block).
These are the two basic paths that allow DAX filesystems to continue operating
in the presence of media errors. More robust error recovery mechanisms can be
built on top of this in the future, for example, involving redundancy/mirroring
provided at the block layer through DM, or additionally, at the filesystem
level. These would have to rely on the above two tenets, that error clearing
can happen either by sending an IO through the driver, or zeroing (also through
the driver).
Shortcomings Shortcomings
------------ ------------
......
...@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) ...@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/ */
static long static long
axon_ram_direct_access(struct block_device *device, sector_t sector, axon_ram_direct_access(struct block_device *device, sector_t sector,
void __pmem **kaddr, pfn_t *pfn) void __pmem **kaddr, pfn_t *pfn, long size)
{ {
struct axon_ram_bank *bank = device->bd_disk->private_data; struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/blkpg.h> #include <linux/blkpg.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/badblocks.h>
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
......
...@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, ...@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX #ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector, static long brd_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn) void __pmem **kaddr, pfn_t *pfn, long size)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page; struct page *page;
......
...@@ -164,14 +164,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, ...@@ -164,14 +164,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
} }
static long pmem_direct_access(struct block_device *bdev, sector_t sector, static long pmem_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn) void __pmem **kaddr, pfn_t *pfn, long size)
{ {
struct pmem_device *pmem = bdev->bd_queue->queuedata; struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset; resource_size_t offset = sector * 512 + pmem->data_offset;
if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
return -EIO;
*kaddr = pmem->virt_addr + offset; *kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
/*
* If badblocks are present, limit known good range to the
* requested range.
*/
if (unlikely(pmem->bb.count))
return size;
return pmem->size - pmem->pfn_pad - offset; return pmem->size - pmem->pfn_pad - offset;
} }
......
...@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode); ...@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q, static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio); struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void __pmem **kaddr, pfn_t *pfn); void __pmem **kaddr, pfn_t *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
...@@ -884,7 +884,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -884,7 +884,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
static long static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum, dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
void __pmem **kaddr, pfn_t *pfn) void __pmem **kaddr, pfn_t *pfn, long size)
{ {
struct dcssblk_dev_info *dev_info; struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz; unsigned long offset, dev_sz;
......
...@@ -51,6 +51,18 @@ struct block_device *I_BDEV(struct inode *inode) ...@@ -51,6 +51,18 @@ struct block_device *I_BDEV(struct inode *inode)
} }
EXPORT_SYMBOL(I_BDEV); EXPORT_SYMBOL(I_BDEV);
void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
va_end(args);
}
static void bdev_write_inode(struct block_device *bdev) static void bdev_write_inode(struct block_device *bdev)
{ {
struct inode *inode = bdev->bd_inode; struct inode *inode = bdev->bd_inode;
...@@ -489,7 +501,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax) ...@@ -489,7 +501,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
sector += get_start_sect(bdev); sector += get_start_sect(bdev);
if (sector % (PAGE_SIZE / 512)) if (sector % (PAGE_SIZE / 512))
return -EINVAL; return -EINVAL;
avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn); avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
if (!avail) if (!avail)
return -ERANGE; return -ERANGE;
if (avail > 0 && avail & ~PAGE_MASK) if (avail > 0 && avail & ~PAGE_MASK)
...@@ -498,6 +510,75 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax) ...@@ -498,6 +510,75 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
} }
EXPORT_SYMBOL_GPL(bdev_direct_access); EXPORT_SYMBOL_GPL(bdev_direct_access);
/**
* bdev_dax_supported() - Check if the device supports dax for filesystem
* @sb: The superblock of the device
* @blocksize: The block size of the device
*
* This is a library function for filesystems to check if the block device
* can be mounted with dax option.
*
* Return: negative errno if unsupported, 0 if supported.
*/
int bdev_dax_supported(struct super_block *sb, int blocksize)
{
struct blk_dax_ctl dax = {
.sector = 0,
.size = PAGE_SIZE,
};
int err;
if (blocksize != PAGE_SIZE) {
vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
return -EINVAL;
}
err = bdev_direct_access(sb->s_bdev, &dax);
if (err < 0) {
switch (err) {
case -EOPNOTSUPP:
vfs_msg(sb, KERN_ERR,
"error: device does not support dax");
break;
case -EINVAL:
vfs_msg(sb, KERN_ERR,
"error: unaligned partition for dax");
break;
default:
vfs_msg(sb, KERN_ERR,
"error: dax access failed (%d)", err);
}
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(bdev_dax_supported);
/**
* bdev_dax_capable() - Return if the raw device is capable for dax
* @bdev: The device for raw block device access
*/
bool bdev_dax_capable(struct block_device *bdev)
{
struct blk_dax_ctl dax = {
.size = PAGE_SIZE,
};
if (!IS_ENABLED(CONFIG_FS_DAX))
return false;
dax.sector = 0;
if (bdev_direct_access(bdev, &dax) < 0)
return false;
dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
if (bdev_direct_access(bdev, &dax) < 0)
return false;
return true;
}
/* /*
* pseudo-fs * pseudo-fs
*/ */
...@@ -1160,33 +1241,6 @@ void bd_set_size(struct block_device *bdev, loff_t size) ...@@ -1160,33 +1241,6 @@ void bd_set_size(struct block_device *bdev, loff_t size)
} }
EXPORT_SYMBOL(bd_set_size); EXPORT_SYMBOL(bd_set_size);
static bool blkdev_dax_capable(struct block_device *bdev)
{
struct gendisk *disk = bdev->bd_disk;
if (!disk->fops->direct_access || !IS_ENABLED(CONFIG_FS_DAX))
return false;
/*
* If the partition is not aligned on a page boundary, we can't
* do dax I/O to it.
*/
if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512))
|| (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
return false;
/*
* If the device has known bad blocks, force all I/O through the
* driver / page cache.
*
* TODO: support finer grained dax error handling
*/
if (disk->bb && disk->bb->count)
return false;
return true;
}
static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
/* /*
...@@ -1266,7 +1320,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1266,7 +1320,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (!ret) { if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
if (!blkdev_dax_capable(bdev)) if (!bdev_dax_capable(bdev))
bdev->bd_inode->i_flags &= ~S_DAX; bdev->bd_inode->i_flags &= ~S_DAX;
} }
...@@ -1303,7 +1357,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1303,7 +1357,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear; goto out_clear;
} }
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
if (!blkdev_dax_capable(bdev)) if (!bdev_dax_capable(bdev))
bdev->bd_inode->i_flags &= ~S_DAX; bdev->bd_inode->i_flags &= ~S_DAX;
} }
} else { } else {
......
This diff is collapsed.
...@@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = __dax_fault(vma, vmf, ext2_get_block, NULL); ret = __dax_fault(vma, vmf, ext2_get_block);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
...@@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL); ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (flags & FAULT_FLAG_WRITE) if (flags & FAULT_FLAG_WRITE)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/highuid.h> #include <linux/highuid.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/blkdev.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -737,19 +738,18 @@ static int ext2_get_blocks(struct inode *inode, ...@@ -737,19 +738,18 @@ static int ext2_get_blocks(struct inode *inode,
* so that it's not found by another thread before it's * so that it's not found by another thread before it's
* initialised * initialised
*/ */
err = dax_clear_sectors(inode->i_sb->s_bdev, err = sb_issue_zeroout(inode->i_sb,
le32_to_cpu(chain[depth-1].key) << le32_to_cpu(chain[depth-1].key), count,
(inode->i_blkbits - 9), GFP_NOFS);
1 << inode->i_blkbits);
if (err) { if (err) {
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
goto cleanup; goto cleanup;
} }
} } else
set_buffer_new(bh_result);
ext2_splice_branch(inode, iblock, partial, indirect_blks, count); ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
set_buffer_new(bh_result);
got_it: got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
if (count > blocks_to_boundary) if (count > blocks_to_boundary)
......
...@@ -922,17 +922,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ...@@ -922,17 +922,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) { err = bdev_dax_supported(sb, blocksize);
ext2_msg(sb, KERN_ERR, if (err)
"error: unsupported blocksize for dax");
goto failed_mount;
}
if (!sb->s_bdev->bd_disk->fops->direct_access) {
ext2_msg(sb, KERN_ERR,
"error: device does not support dax");
goto failed_mount; goto failed_mount;
} }
}
/* If the blocksize doesn't match, re-read the thing.. */ /* If the blocksize doesn't match, re-read the thing.. */
if (sb->s_blocksize != blocksize) { if (sb->s_blocksize != blocksize) {
......
...@@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle)) if (IS_ERR(handle))
result = VM_FAULT_SIGBUS; result = VM_FAULT_SIGBUS;
else else
result = __dax_fault(vma, vmf, ext4_dax_get_block, NULL); result = __dax_fault(vma, vmf, ext4_dax_get_block);
if (write) { if (write) {
if (!IS_ERR(handle)) if (!IS_ERR(handle))
...@@ -238,7 +238,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -238,7 +238,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
result = VM_FAULT_SIGBUS; result = VM_FAULT_SIGBUS;
else else
result = __dax_pmd_fault(vma, addr, pmd, flags, result = __dax_pmd_fault(vma, addr, pmd, flags,
ext4_dax_get_block, NULL); ext4_dax_get_block);
if (write) { if (write) {
if (!IS_ERR(handle)) if (!IS_ERR(handle))
......
...@@ -3417,17 +3417,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3417,17 +3417,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} }
if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) { err = bdev_dax_supported(sb, blocksize);
ext4_msg(sb, KERN_ERR, if (err)
"error: unsupported blocksize for dax");
goto failed_mount;
}
if (!sb->s_bdev->bd_disk->fops->direct_access) {
ext4_msg(sb, KERN_ERR,
"error: device does not support dax");
goto failed_mount; goto failed_mount;
} }
}
if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
......
...@@ -72,18 +72,11 @@ xfs_zero_extent( ...@@ -72,18 +72,11 @@ xfs_zero_extent(
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb); xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
sector_t block = XFS_BB_TO_FSBT(mp, sector); sector_t block = XFS_BB_TO_FSBT(mp, sector);
ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
if (IS_DAX(VFS_I(ip)))
return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
sector, size);
/*
* let the block layer decide on the fastest method of
* implementing the zeroing.
*/
return sb_issue_zeroout(mp->m_super, block, count_fsb, GFP_NOFS);
return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, true);
} }
/* /*
......
...@@ -1551,7 +1551,7 @@ xfs_filemap_page_mkwrite( ...@@ -1551,7 +1551,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL); ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
} else { } else {
ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
ret = block_page_mkwrite_return(ret); ret = block_page_mkwrite_return(ret);
...@@ -1585,7 +1585,7 @@ xfs_filemap_fault( ...@@ -1585,7 +1585,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent * changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings. * ioend for conversion on read-only mappings.
*/ */
ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL); ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
} else } else
ret = filemap_fault(vma, vmf); ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
...@@ -1622,8 +1622,7 @@ xfs_filemap_pmd_fault( ...@@ -1622,8 +1622,7 @@ xfs_filemap_pmd_fault(
} }
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault, ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
NULL);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (flags & FAULT_FLAG_WRITE) if (flags & FAULT_FLAG_WRITE)
......
...@@ -1556,13 +1556,11 @@ xfs_fs_fill_super( ...@@ -1556,13 +1556,11 @@ xfs_fs_fill_super(
if (mp->m_flags & XFS_MOUNT_DAX) { if (mp->m_flags & XFS_MOUNT_DAX) {
xfs_warn(mp, xfs_warn(mp,
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
if (sb->s_blocksize != PAGE_SIZE) {
xfs_alert(mp, error = bdev_dax_supported(sb, sb->s_blocksize);
"Filesystem block size invalid for DAX Turning DAX off."); if (error) {
mp->m_flags &= ~XFS_MOUNT_DAX;
} else if (!sb->s_bdev->bd_disk->fops->direct_access) {
xfs_alert(mp, xfs_alert(mp,
"Block device does not support DAX Turning DAX off."); "DAX unsupported by block device. Turning off DAX.");
mp->m_flags &= ~XFS_MOUNT_DAX; mp->m_flags &= ~XFS_MOUNT_DAX;
} }
} }
......
...@@ -768,6 +768,17 @@ static inline void rq_flush_dcache_pages(struct request *rq) ...@@ -768,6 +768,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
} }
#endif #endif
#ifdef CONFIG_PRINTK
#define vfs_msg(sb, level, fmt, ...) \
__vfs_msg(sb, level, fmt, ##__VA_ARGS__)
#else
#define vfs_msg(sb, level, fmt, ...) \
do { \
no_printk(fmt, ##__VA_ARGS__); \
__vfs_msg(sb, "", " "); \
} while (0)
#endif
extern int blk_register_queue(struct gendisk *disk); extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio); extern blk_qc_t generic_make_request(struct bio *bio);
...@@ -1660,7 +1671,7 @@ struct block_device_operations { ...@@ -1660,7 +1671,7 @@ struct block_device_operations {
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void __pmem **, long (*direct_access)(struct block_device *, sector_t, void __pmem **,
pfn_t *); pfn_t *, long);
unsigned int (*check_events) (struct gendisk *disk, unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing); unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */ /* ->media_changed() is DEPRECATED, use ->check_events() instead */
...@@ -1680,6 +1691,8 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *); ...@@ -1680,6 +1691,8 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *, extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *); struct writeback_control *);
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
extern int bdev_dax_supported(struct super_block *, int);
extern bool bdev_dax_capable(struct block_device *);
#else /* CONFIG_BLOCK */ #else /* CONFIG_BLOCK */
struct block_device; struct block_device;
......
...@@ -7,41 +7,44 @@ ...@@ -7,41 +7,44 @@
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags); get_block_t, dio_iodone_t, int flags);
int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
dax_iodone_t); int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n); struct page *read_dax_sector(struct block_device *bdev, sector_t n);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length);
#else #else
static inline struct page *read_dax_sector(struct block_device *bdev, static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n) sector_t n)
{ {
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
} }
static inline int __dax_zero_page_range(struct block_device *bdev,
sector_t sector, unsigned int offset, unsigned int length)
{
return -ENXIO;
}
#endif #endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t); unsigned int flags, get_block_t);
int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t); unsigned int flags, get_block_t);
#else #else
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags, get_block_t gb, pmd_t *pmd, unsigned int flags, get_block_t gb)
dax_iodone_t di)
{ {
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
#define __dax_pmd_fault dax_pmd_fault #define __dax_pmd_fault dax_pmd_fault
#endif #endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) #define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma) static inline bool vma_is_dax(struct vm_area_struct *vma)
{ {
......
...@@ -74,7 +74,6 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock, ...@@ -74,7 +74,6 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private); ssize_t bytes, void *private);
typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
#define MAY_EXEC 0x00000001 #define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002 #define MAY_WRITE 0x00000002
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment