Commit 8a56d7c3 authored by Dave Chinner's avatar Dave Chinner

Merge branch 'xfs-io-fixes' into for-next

parents 316433be 0a50f162
...@@ -172,6 +172,12 @@ xfs_setfilesize_ioend( ...@@ -172,6 +172,12 @@ xfs_setfilesize_ioend(
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
/* we abort the update if there was an IO error */
if (ioend->io_error) {
xfs_trans_cancel(tp);
return ioend->io_error;
}
return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
} }
...@@ -212,14 +218,17 @@ xfs_end_io( ...@@ -212,14 +218,17 @@ xfs_end_io(
ioend->io_error = -EIO; ioend->io_error = -EIO;
goto done; goto done;
} }
if (ioend->io_error)
goto done;
/* /*
* For unwritten extents we need to issue transactions to convert a * For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished. * range to normal written extens after the data I/O has finished.
* Detecting and handling completion IO errors is done individually
* for each case as different cleanup operations need to be performed
* on error.
*/ */
if (ioend->io_type == XFS_IO_UNWRITTEN) { if (ioend->io_type == XFS_IO_UNWRITTEN) {
if (ioend->io_error)
goto done;
error = xfs_iomap_write_unwritten(ip, ioend->io_offset, error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size); ioend->io_size);
} else if (ioend->io_append_trans) { } else if (ioend->io_append_trans) {
...@@ -1399,12 +1408,12 @@ __xfs_get_blocks( ...@@ -1399,12 +1408,12 @@ __xfs_get_blocks(
imap.br_startblock == DELAYSTARTBLOCK))) { imap.br_startblock == DELAYSTARTBLOCK))) {
if (direct || xfs_get_extsz_hint(ip)) { if (direct || xfs_get_extsz_hint(ip)) {
/* /*
* Drop the ilock in preparation for starting the block * xfs_iomap_write_direct() expects the shared lock. It
* allocation transaction. It will be retaken * is unlocked on return.
* exclusively inside xfs_iomap_write_direct for the
* actual allocation.
*/ */
xfs_iunlock(ip, lockmode); if (lockmode == XFS_ILOCK_EXCL)
xfs_ilock_demote(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset, size, error = xfs_iomap_write_direct(ip, offset, size,
&imap, nimaps); &imap, nimaps);
if (error) if (error)
......
...@@ -482,6 +482,8 @@ xfs_zero_eof( ...@@ -482,6 +482,8 @@ xfs_zero_eof(
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(offset > isize); ASSERT(offset > isize);
trace_xfs_zero_eof(ip, isize, offset - isize);
/* /*
* First handle zeroing the block on which isize resides. * First handle zeroing the block on which isize resides.
* *
...@@ -574,6 +576,7 @@ xfs_file_aio_write_checks( ...@@ -574,6 +576,7 @@ xfs_file_aio_write_checks(
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
ssize_t error = 0; ssize_t error = 0;
size_t count = iov_iter_count(from); size_t count = iov_iter_count(from);
bool drained_dio = false;
restart: restart:
error = generic_write_checks(iocb, from); error = generic_write_checks(iocb, from);
...@@ -611,12 +614,13 @@ xfs_file_aio_write_checks( ...@@ -611,12 +614,13 @@ xfs_file_aio_write_checks(
bool zero = false; bool zero = false;
spin_unlock(&ip->i_flags_lock); spin_unlock(&ip->i_flags_lock);
if (*iolock == XFS_IOLOCK_SHARED) { if (!drained_dio) {
xfs_rw_iunlock(ip, *iolock); if (*iolock == XFS_IOLOCK_SHARED) {
*iolock = XFS_IOLOCK_EXCL; xfs_rw_iunlock(ip, *iolock);
xfs_rw_ilock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL;
iov_iter_reexpand(from, count); xfs_rw_ilock(ip, *iolock);
iov_iter_reexpand(from, count);
}
/* /*
* We now have an IO submission barrier in place, but * We now have an IO submission barrier in place, but
* AIO can do EOF updates during IO completion and hence * AIO can do EOF updates during IO completion and hence
...@@ -626,6 +630,7 @@ xfs_file_aio_write_checks( ...@@ -626,6 +630,7 @@ xfs_file_aio_write_checks(
* no-op. * no-op.
*/ */
inode_dio_wait(inode); inode_dio_wait(inode);
drained_dio = true;
goto restart; goto restart;
} }
error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
......
...@@ -131,20 +131,29 @@ xfs_iomap_write_direct( ...@@ -131,20 +131,29 @@ xfs_iomap_write_direct(
uint qblocks, resblks, resrtextents; uint qblocks, resblks, resrtextents;
int committed; int committed;
int error; int error;
int lockmode;
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
rt = XFS_IS_REALTIME_INODE(ip); rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip); extsz = xfs_get_extsz_hint(ip);
lockmode = XFS_ILOCK_SHARED; /* locked by caller */
ASSERT(xfs_isilocked(ip, lockmode));
offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset);
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
if ((offset + count) > XFS_ISIZE(ip)) { if ((offset + count) > XFS_ISIZE(ip)) {
/*
* Assert that the in-core extent list is present since this can
* call xfs_iread_extents() and we only have the ilock shared.
* This should be safe because the lock was held around a bmapi
* call in the caller and we only need it to access the in-core
* list.
*/
ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
XFS_IFEXTENTS);
error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
if (error) if (error)
return error; goto out_unlock;
} else { } else {
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t) last_fsb = MIN(last_fsb, (xfs_fileoff_t)
...@@ -173,6 +182,15 @@ xfs_iomap_write_direct( ...@@ -173,6 +182,15 @@ xfs_iomap_write_direct(
quota_flag = XFS_QMOPT_RES_REGBLKS; quota_flag = XFS_QMOPT_RES_REGBLKS;
} }
/*
* Drop the shared lock acquired by the caller, attach the dquot if
* necessary and move on to transaction setup.
*/
xfs_iunlock(ip, lockmode);
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
/* /*
* Allocate and setup the transaction * Allocate and setup the transaction
*/ */
...@@ -187,7 +205,8 @@ xfs_iomap_write_direct( ...@@ -187,7 +205,8 @@ xfs_iomap_write_direct(
return error; return error;
} }
xfs_ilock(ip, XFS_ILOCK_EXCL); lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
if (error) if (error)
...@@ -229,7 +248,7 @@ xfs_iomap_write_direct( ...@@ -229,7 +248,7 @@ xfs_iomap_write_direct(
error = xfs_alert_fsblock_zero(ip, imap); error = xfs_alert_fsblock_zero(ip, imap);
out_unlock: out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, lockmode);
return error; return error;
out_bmap_cancel: out_bmap_cancel:
......
...@@ -181,6 +181,11 @@ xfs_fs_map_blocks( ...@@ -181,6 +181,11 @@ xfs_fs_map_blocks(
ASSERT(imap.br_startblock != DELAYSTARTBLOCK); ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
/*
* xfs_iomap_write_direct() expects to take ownership of
* the shared ilock.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_iomap_write_direct(ip, offset, length, error = xfs_iomap_write_direct(ip, offset, length,
&imap, nimaps); &imap, nimaps);
if (error) if (error)
......
...@@ -1312,6 +1312,7 @@ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); ...@@ -1312,6 +1312,7 @@ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
DECLARE_EVENT_CLASS(xfs_itrunc_class, DECLARE_EVENT_CLASS(xfs_itrunc_class,
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment