Commit e890038e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-fixes-for-linus-4.9-rc3' of...

Merge tag 'xfs-fixes-for-linus-4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs

Pull xfs fixes from Dave Chinner:
 "This update contains fixes for most of the outstanding regressions
  introduced with the 4.9-rc1 XFS merge. There is also a fix for an
  iomap bug, too.

  This is a quite a bit larger than I'd prefer for a -rc3, but most of
  the change comes from cleaning up the new reflink copy on write code;
  it's much simpler and easier to understand now. These changes fixed
  several bugs in the new code, and it wasn't clear that there was an
  easier/simpler way to fix them. The rest of the fixes are the usual
  size you'd expect at this stage.

  I've left the commits to soak in linux-next for a some extra time
  because of the size before asking you to pull, no new problems with
  them have been reported so I think it's all OK.

  Summary:
   - iomap page offset masking fix for page faults
   - add IOMAP_REPORT to distinguish between read and fiemap map
     requests
   - cleanups to new shared data extent code
   - fix mount active status on failed log recovery
   - fix broken dquots in a buffer calculation
   - fix locking order issues and merge xfs_reflink_remap_range and
     xfs_file_share_range
   - rework unmapping of CoW extents and remove now unused functions
   - clean state when CoW is done"

* tag 'xfs-fixes-for-linus-4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs: (25 commits)
  xfs: clear cowblocks tag when cow fork is emptied
  xfs: fix up inode cowblocks tracking tracepoints
  fs: Do to trim high file position bits in iomap_page_mkwrite_actor
  xfs: remove xfs_bunmapi_cow
  xfs: optimize xfs_reflink_end_cow
  xfs: optimize xfs_reflink_cancel_cow_blocks
  xfs: refactor xfs_bunmapi_cow
  xfs: optimize writes to reflink files
  xfs: don't bother looking at the refcount tree for reads
  xfs: handle "raw" delayed extents xfs_reflink_trim_around_shared
  xfs: add xfs_trim_extent
  iomap: add IOMAP_REPORT
  xfs: merge xfs_reflink_remap_range and xfs_file_share_range
  xfs: remove xfs_file_wait_for_io
  xfs: move inode locking from xfs_reflink_remap_range to xfs_file_share_range
  xfs: fix the same_inode check in xfs_file_share_range
  xfs: remove the same fs check from xfs_file_share_range
  libxfs: v3 inodes are only valid on crc-enabled filesystems
  libxfs: clean up _calc_dquots_per_chunk
  xfs: unset MS_ACTIVE if mount fails
  ...
parents 18c2152d c17a8ef4
...@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
struct page *page = data; struct page *page = data;
int ret; int ret;
ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length, ret = __block_write_begin_int(page, pos, length, NULL, iomap);
NULL, iomap);
if (ret) if (ret)
return ret; return ret;
...@@ -561,7 +560,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, ...@@ -561,7 +560,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
} }
while (len > 0) { while (len > 0) {
ret = iomap_apply(inode, start, len, 0, ops, &ctx, ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
iomap_fiemap_actor); iomap_fiemap_actor);
/* inode with no (attribute) mapping will give ENOENT */ /* inode with no (attribute) mapping will give ENOENT */
if (ret == -ENOENT) if (ret == -ENOENT)
......
This diff is collapsed.
...@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt, ...@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
#define XFS_BMAP_TRACE_EXLIST(ip,c,w) #define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#endif #endif
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
...@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip, ...@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags, xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock, xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops, int *done); struct xfs_defer_ops *dfops, int *done);
int xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del); int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
xfs_extnum_t *idx, struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del);
void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx, int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num); xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip); uint xfs_default_attroffset(struct xfs_inode *ip);
......
...@@ -4826,7 +4826,7 @@ xfs_btree_calc_size( ...@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
return rval; return rval;
} }
int static int
xfs_btree_count_blocks_helper( xfs_btree_count_blocks_helper(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
......
...@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc( ...@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
if (mp->m_quotainfo) if (mp->m_quotainfo)
ndquots = mp->m_quotainfo->qi_dqperchunk; ndquots = mp->m_quotainfo->qi_dqperchunk;
else else
ndquots = xfs_calc_dquots_per_chunk( ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
XFS_BB_TO_FSB(mp, bp->b_length));
for (i = 0; i < ndquots; i++, d++) { for (i = 0; i < ndquots; i++, d++) {
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
......
...@@ -865,7 +865,6 @@ typedef struct xfs_timestamp { ...@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
* padding field for v3 inodes. * padding field for v3 inodes.
*/ */
#define XFS_DINODE_MAGIC 0x494e /* 'IN' */ #define XFS_DINODE_MAGIC 0x494e /* 'IN' */
#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
typedef struct xfs_dinode { typedef struct xfs_dinode {
__be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */ __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
__be16 di_mode; /* mode and type of file */ __be16 di_mode; /* mode and type of file */
......
...@@ -57,6 +57,17 @@ xfs_inobp_check( ...@@ -57,6 +57,17 @@ xfs_inobp_check(
} }
#endif #endif
bool
xfs_dinode_good_version(
struct xfs_mount *mp,
__u8 version)
{
if (xfs_sb_version_hascrc(&mp->m_sb))
return version == 3;
return version == 1 || version == 2;
}
/* /*
* If we are doing readahead on an inode buffer, we might be in log recovery * If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence * reading an inode allocation buffer that hasn't yet been replayed, and hence
...@@ -91,7 +102,7 @@ xfs_inode_buf_verify( ...@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
XFS_DINODE_GOOD_VERSION(dip->di_version); xfs_dinode_good_version(mp, dip->di_version);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp, if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP, XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) { XFS_RANDOM_ITOBP_INOTOBP))) {
......
...@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); ...@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
struct xfs_dinode *to); struct xfs_dinode *to);
bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
#if defined(DEBUG) #if defined(DEBUG)
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#else #else
......
...@@ -249,6 +249,7 @@ xfs_file_dio_aio_read( ...@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
loff_t isize = i_size_read(inode); loff_t isize = i_size_read(inode);
size_t count = iov_iter_count(to); size_t count = iov_iter_count(to);
loff_t end = iocb->ki_pos + count - 1;
struct iov_iter data; struct iov_iter data;
struct xfs_buftarg *target; struct xfs_buftarg *target;
ssize_t ret = 0; ssize_t ret = 0;
...@@ -272,49 +273,21 @@ xfs_file_dio_aio_read( ...@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
/*
* Locking is a bit tricky here. If we take an exclusive lock for direct
* IO, we effectively serialise all new concurrent read IO to this file
* and block it behind IO that is currently in progress because IO in
* progress holds the IO lock shared. We only need to hold the lock
* exclusive to blow away the page cache, so only take lock exclusively
* if the page cache needs invalidation. This allows the normal direct
* IO case of no page cache pages to proceeed concurrently without
* serialisation.
*/
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
if (mapping->nrpages) { if (mapping->nrpages) {
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); if (ret)
goto out_unlock;
/* /*
* The generic dio code only flushes the range of the particular * Invalidate whole pages. This can return an error if we fail
* I/O. Because we take an exclusive lock here, this whole * to invalidate a page, but this should never happen on XFS.
* sequence is considerably more expensive for us. This has a * Warn if it does fail.
* noticeable performance impact for any file with cached pages,
* even when outside of the range of the particular I/O.
*
* Hence, amortize the cost of the lock against a full file
* flush and reduce the chances of repeated iolock cycles going
* forward.
*/ */
if (mapping->nrpages) { ret = invalidate_inode_pages2_range(mapping,
ret = filemap_write_and_wait(mapping); iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
if (ret) { WARN_ON_ONCE(ret);
xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); ret = 0;
return ret;
}
/*
* Invalidate whole pages. This can return an error if
* we fail to invalidate a page, but this should never
* happen on XFS. Warn if it does fail.
*/
ret = invalidate_inode_pages2(mapping);
WARN_ON_ONCE(ret);
ret = 0;
}
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
} }
data = *to; data = *to;
...@@ -324,8 +297,9 @@ xfs_file_dio_aio_read( ...@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
iocb->ki_pos += ret; iocb->ki_pos += ret;
iov_iter_advance(to, ret); iov_iter_advance(to, ret);
} }
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
out_unlock:
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret; return ret;
} }
...@@ -570,61 +544,49 @@ xfs_file_dio_aio_write( ...@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
if ((iocb->ki_pos | count) & target->bt_logical_sectormask) if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
return -EINVAL; return -EINVAL;
/* "unaligned" here means not aligned to a filesystem block */
if ((iocb->ki_pos & mp->m_blockmask) ||
((iocb->ki_pos + count) & mp->m_blockmask))
unaligned_io = 1;
/* /*
* We don't need to take an exclusive lock unless there page cache needs * Don't take the exclusive iolock here unless the I/O is unaligned to
* to be invalidated or unaligned IO is being executed. We don't need to * the file system block size. We don't need to consider the EOF
* consider the EOF extension case here because * extension case here because xfs_file_aio_write_checks() will relock
* xfs_file_aio_write_checks() will relock the inode as necessary for * the inode as necessary for EOF zeroing cases and fill out the new
* EOF zeroing cases and fill out the new inode size as appropriate. * inode size as appropriate.
*/ */
if (unaligned_io || mapping->nrpages) if ((iocb->ki_pos & mp->m_blockmask) ||
((iocb->ki_pos + count) & mp->m_blockmask)) {
unaligned_io = 1;
iolock = XFS_IOLOCK_EXCL; iolock = XFS_IOLOCK_EXCL;
else } else {
iolock = XFS_IOLOCK_SHARED; iolock = XFS_IOLOCK_SHARED;
xfs_rw_ilock(ip, iolock);
/*
* Recheck if there are cached pages that need invalidate after we got
* the iolock to protect against other threads adding new pages while
* we were waiting for the iolock.
*/
if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
xfs_rw_iunlock(ip, iolock);
iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, iolock);
} }
xfs_rw_ilock(ip, iolock);
ret = xfs_file_aio_write_checks(iocb, from, &iolock); ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret) if (ret)
goto out; goto out;
count = iov_iter_count(from); count = iov_iter_count(from);
end = iocb->ki_pos + count - 1; end = iocb->ki_pos + count - 1;
/*
* See xfs_file_dio_aio_read() for why we do a full-file flush here.
*/
if (mapping->nrpages) { if (mapping->nrpages) {
ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
if (ret) if (ret)
goto out; goto out;
/* /*
* Invalidate whole pages. This can return an error if we fail * Invalidate whole pages. This can return an error if we fail
* to invalidate a page, but this should never happen on XFS. * to invalidate a page, but this should never happen on XFS.
* Warn if it does fail. * Warn if it does fail.
*/ */
ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); ret = invalidate_inode_pages2_range(mapping,
iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
ret = 0; ret = 0;
} }
/* /*
* If we are doing unaligned IO, wait for all other IO to drain, * If we are doing unaligned IO, wait for all other IO to drain,
* otherwise demote the lock if we had to flush cached pages * otherwise demote the lock if we had to take the exclusive lock
* for other reasons in xfs_file_aio_write_checks.
*/ */
if (unaligned_io) if (unaligned_io)
inode_dio_wait(inode); inode_dio_wait(inode);
...@@ -947,134 +909,6 @@ xfs_file_fallocate( ...@@ -947,134 +909,6 @@ xfs_file_fallocate(
return error; return error;
} }
/*
* Flush all file writes out to disk.
*/
static int
xfs_file_wait_for_io(
struct inode *inode,
loff_t offset,
size_t len)
{
loff_t rounding;
loff_t ioffset;
loff_t iendoffset;
loff_t bs;
int ret;
bs = inode->i_sb->s_blocksize;
inode_dio_wait(inode);
rounding = max_t(xfs_off_t, bs, PAGE_SIZE);
ioffset = round_down(offset, rounding);
iendoffset = round_up(offset + len, rounding) - 1;
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
iendoffset);
return ret;
}
/* Hook up to the VFS reflink function */
STATIC int
xfs_file_share_range(
struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
u64 len,
bool is_dedupe)
{
struct inode *inode_in;
struct inode *inode_out;
ssize_t ret;
loff_t bs;
loff_t isize;
int same_inode;
loff_t blen;
unsigned int flags = 0;
inode_in = file_inode(file_in);
inode_out = file_inode(file_out);
bs = inode_out->i_sb->s_blocksize;
/* Don't touch certain kinds of inodes */
if (IS_IMMUTABLE(inode_out))
return -EPERM;
if (IS_SWAPFILE(inode_in) ||
IS_SWAPFILE(inode_out))
return -ETXTBSY;
/* Reflink only works within this filesystem. */
if (inode_in->i_sb != inode_out->i_sb)
return -EXDEV;
same_inode = (inode_in->i_ino == inode_out->i_ino);
/* Don't reflink dirs, pipes, sockets... */
if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
return -EISDIR;
if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
return -EINVAL;
if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
return -EINVAL;
/* Don't share DAX file data for now. */
if (IS_DAX(inode_in) || IS_DAX(inode_out))
return -EINVAL;
/* Are we going all the way to the end? */
isize = i_size_read(inode_in);
if (isize == 0)
return 0;
if (len == 0)
len = isize - pos_in;
/* Ensure offsets don't wrap and the input is inside i_size */
if (pos_in + len < pos_in || pos_out + len < pos_out ||
pos_in + len > isize)
return -EINVAL;
/* Don't allow dedupe past EOF in the dest file */
if (is_dedupe) {
loff_t disize;
disize = i_size_read(inode_out);
if (pos_out >= disize || pos_out + len > disize)
return -EINVAL;
}
/* If we're linking to EOF, continue to the block boundary. */
if (pos_in + len == isize)
blen = ALIGN(isize, bs) - pos_in;
else
blen = len;
/* Only reflink if we're aligned to block boundaries */
if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
!IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
return -EINVAL;
/* Don't allow overlapped reflink within the same file */
if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)
return -EINVAL;
/* Wait for the completion of any pending IOs on srcfile */
ret = xfs_file_wait_for_io(inode_in, pos_in, len);
if (ret)
goto out;
ret = xfs_file_wait_for_io(inode_out, pos_out, len);
if (ret)
goto out;
if (is_dedupe)
flags |= XFS_REFLINK_DEDUPE;
ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
pos_out, len, flags);
if (ret < 0)
goto out;
out:
return ret;
}
STATIC ssize_t STATIC ssize_t
xfs_file_copy_range( xfs_file_copy_range(
struct file *file_in, struct file *file_in,
...@@ -1086,7 +920,7 @@ xfs_file_copy_range( ...@@ -1086,7 +920,7 @@ xfs_file_copy_range(
{ {
int error; int error;
error = xfs_file_share_range(file_in, pos_in, file_out, pos_out, error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false); len, false);
if (error) if (error)
return error; return error;
...@@ -1101,7 +935,7 @@ xfs_file_clone_range( ...@@ -1101,7 +935,7 @@ xfs_file_clone_range(
loff_t pos_out, loff_t pos_out,
u64 len) u64 len)
{ {
return xfs_file_share_range(file_in, pos_in, file_out, pos_out, return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false); len, false);
} }
...@@ -1124,7 +958,7 @@ xfs_file_dedupe_range( ...@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
if (len > XFS_MAX_DEDUPE_LEN) if (len > XFS_MAX_DEDUPE_LEN)
len = XFS_MAX_DEDUPE_LEN; len = XFS_MAX_DEDUPE_LEN;
error = xfs_file_share_range(src_file, loff, dst_file, dst_loff, error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
len, true); len, true);
if (error) if (error)
return error; return error;
......
...@@ -1656,9 +1656,9 @@ void ...@@ -1656,9 +1656,9 @@ void
xfs_inode_set_cowblocks_tag( xfs_inode_set_cowblocks_tag(
xfs_inode_t *ip) xfs_inode_t *ip)
{ {
trace_xfs_inode_set_eofblocks_tag(ip); trace_xfs_inode_set_cowblocks_tag(ip);
return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
trace_xfs_perag_set_eofblocks, trace_xfs_perag_set_cowblocks,
XFS_ICI_COWBLOCKS_TAG); XFS_ICI_COWBLOCKS_TAG);
} }
...@@ -1666,7 +1666,7 @@ void ...@@ -1666,7 +1666,7 @@ void
xfs_inode_clear_cowblocks_tag( xfs_inode_clear_cowblocks_tag(
xfs_inode_t *ip) xfs_inode_t *ip)
{ {
trace_xfs_inode_clear_eofblocks_tag(ip); trace_xfs_inode_clear_cowblocks_tag(ip);
return __xfs_inode_clear_eofblocks_tag(ip, return __xfs_inode_clear_eofblocks_tag(ip,
trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG); trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
} }
...@@ -566,6 +566,17 @@ xfs_file_iomap_begin_delay( ...@@ -566,6 +566,17 @@ xfs_file_iomap_begin_delay(
xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx, xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
&got, &prev); &got, &prev);
if (!eof && got.br_startoff <= offset_fsb) { if (!eof && got.br_startoff <= offset_fsb) {
if (xfs_is_reflink_inode(ip)) {
bool shared;
end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
maxbytes_fsb);
xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
error = xfs_reflink_reserve_cow(ip, &got, &shared);
if (error)
goto out_unlock;
}
trace_xfs_iomap_found(ip, offset, count, 0, &got); trace_xfs_iomap_found(ip, offset, count, 0, &got);
goto done; goto done;
} }
...@@ -961,19 +972,13 @@ xfs_file_iomap_begin( ...@@ -961,19 +972,13 @@ xfs_file_iomap_begin(
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
struct xfs_bmbt_irec imap; struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb; xfs_fileoff_t offset_fsb, end_fsb;
bool shared, trimmed;
int nimaps = 1, error = 0; int nimaps = 1, error = 0;
bool shared = false, trimmed = false;
unsigned lockmode; unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
error = xfs_reflink_reserve_cow_range(ip, offset, length);
if (error < 0)
return error;
}
if ((flags & IOMAP_WRITE) && !IS_DAX(inode) && if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
!xfs_get_extsz_hint(ip)) { !xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */ /* Reserve delalloc blocks for regular writeback. */
...@@ -981,7 +986,16 @@ xfs_file_iomap_begin( ...@@ -981,7 +986,16 @@ xfs_file_iomap_begin(
iomap); iomap);
} }
lockmode = xfs_ilock_data_map_shared(ip); /*
* COW writes will allocate delalloc space, so we need to make sure
* to take the lock exclusively here.
*/
if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, XFS_ILOCK_EXCL);
} else {
lockmode = xfs_ilock_data_map_shared(ip);
}
ASSERT(offset <= mp->m_super->s_maxbytes); ASSERT(offset <= mp->m_super->s_maxbytes);
if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
...@@ -991,16 +1005,24 @@ xfs_file_iomap_begin( ...@@ -991,16 +1005,24 @@ xfs_file_iomap_begin(
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0); &nimaps, 0);
if (error) { if (error)
xfs_iunlock(ip, lockmode); goto out_unlock;
return error;
if (flags & IOMAP_REPORT) {
/* Trim the mapping to the nearest shared extent boundary. */
error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
&trimmed);
if (error)
goto out_unlock;
} }
/* Trim the mapping to the nearest shared extent boundary. */ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed); error = xfs_reflink_reserve_cow(ip, &imap, &shared);
if (error) { if (error)
xfs_iunlock(ip, lockmode); goto out_unlock;
return error;
end_fsb = imap.br_startoff + imap.br_blockcount;
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
} }
if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) { if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
...@@ -1039,6 +1061,9 @@ xfs_file_iomap_begin( ...@@ -1039,6 +1061,9 @@ xfs_file_iomap_begin(
if (shared) if (shared)
iomap->flags |= IOMAP_F_SHARED; iomap->flags |= IOMAP_F_SHARED;
return 0; return 0;
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
} }
static int static int
......
...@@ -1009,6 +1009,7 @@ xfs_mountfs( ...@@ -1009,6 +1009,7 @@ xfs_mountfs(
out_quota: out_quota:
xfs_qm_unmount_quotas(mp); xfs_qm_unmount_quotas(mp);
out_rtunmount: out_rtunmount:
mp->m_super->s_flags &= ~MS_ACTIVE;
xfs_rtunmount_inodes(mp); xfs_rtunmount_inodes(mp);
out_rele_rip: out_rele_rip:
IRELE(rip); IRELE(rip);
......
This diff is collapsed.
...@@ -26,8 +26,8 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno, ...@@ -26,8 +26,8 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip, extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed); struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip, extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
xfs_off_t offset, xfs_off_t count); struct xfs_bmbt_irec *imap, bool *shared);
extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip, extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
xfs_off_t offset, xfs_off_t count); xfs_off_t offset, xfs_off_t count);
extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset, extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
...@@ -43,11 +43,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, ...@@ -43,11 +43,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count); xfs_off_t count);
extern int xfs_reflink_recover_cow(struct xfs_mount *mp); extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
#define XFS_REFLINK_DEDUPE 1 /* only reflink if contents match */ extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
#define XFS_REFLINK_ALL (XFS_REFLINK_DEDUPE) struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff,
struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len,
unsigned int flags);
extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip, extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
struct xfs_trans **tpp); struct xfs_trans **tpp);
extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset, extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
......
...@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = { ...@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
}; };
struct kobj_type xfs_error_cfg_ktype = { static struct kobj_type xfs_error_cfg_ktype = {
.release = xfs_sysfs_release, .release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops, .sysfs_ops = &xfs_sysfs_ops,
.default_attrs = xfs_error_attrs, .default_attrs = xfs_error_attrs,
}; };
struct kobj_type xfs_error_ktype = { static struct kobj_type xfs_error_ktype = {
.release = xfs_sysfs_release, .release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops, .sysfs_ops = &xfs_sysfs_ops,
}; };
......
...@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc); ...@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range); DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range); DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write); DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
...@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec); ...@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
......
...@@ -19,11 +19,15 @@ struct vm_fault; ...@@ -19,11 +19,15 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ #define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/* /*
* Flags for iomap mappings: * Flags for all iomap mappings:
*/ */
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_SHARED 0x02 /* block shared with another file */
#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */ /*
* Flags that only need to be reported for IOMAP_REPORT requests:
*/
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
/* /*
* Magic value for blkno: * Magic value for blkno:
...@@ -42,8 +46,9 @@ struct iomap { ...@@ -42,8 +46,9 @@ struct iomap {
/* /*
* Flags for iomap_begin / iomap_end. No flag implies a read. * Flags for iomap_begin / iomap_end. No flag implies a read.
*/ */
#define IOMAP_WRITE (1 << 0) #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
#define IOMAP_ZERO (1 << 1) #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
struct iomap_ops { struct iomap_ops {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment