Commit 7271d243 authored by Dave Chinner's avatar Dave Chinner Committed by Alex Elder

xfs: don't serialise adjacent concurrent direct IO appending writes

For append write workloads, extending the file requires a certain
amount of exclusive locking to be done up front to ensure sanity in
things like ensuring that we've zeroed any allocated regions
between the old EOF and the start of the new IO.

For single threads, this typically isn't a problem, and for large
IOs we don't serialise enough for it to be a problem for two
threads on really fast block devices. However for smaller IO and
larger thread counts we have a problem.

Take 4 concurrent sequential, single block sized and aligned IOs.
After the first IO is submitted but before it completes, we end up
with this state:

        IO 1    IO 2    IO 3    IO 4
      +-------+-------+-------+-------+
      ^       ^
      |       |
      |       |
      |       |
      |       \- ip->i_new_size
      \- ip->i_size

And the IO is done without exclusive locking because offset <=
ip->i_size. When we submit IO 2, we see offset > ip->i_size, and
grab the IO lock exclusive, because there is a chance we need to do
EOF zeroing. However, there is already an IO in progress that avoids
the need for IO zeroing because offset <= ip->i_new_size. hence we
could avoid holding the IO lock exlcusive for this. Hence after
submission of the second IO, we'd end up this state:

        IO 1    IO 2    IO 3    IO 4
      +-------+-------+-------+-------+
      ^               ^
      |               |
      |               |
      |               |
      |               \- ip->i_new_size
      \- ip->i_size

There is no need to grab the i_mutex of the IO lock in exclusive
mode if we don't need to invalidate the page cache. Taking these
locks on every direct IO effective serialises them as taking the IO
lock in exclusive mode has to wait for all shared holders to drop
the lock. That only happens when IO is complete, so effective it
prevents dispatch of concurrent direct IO writes to the same inode.

And so you can see that for the third concurrent IO, we'd avoid
exclusive locking for the same reason we avoided the exclusive lock
for the second IO.

Fixing this is a bit more complex than that, because we need to hold
a write-submission local value of ip->i_new_size to that clearing
the value is only done if no other thread has updated it before our
IO completes.....
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent 0c38a251
...@@ -418,11 +418,13 @@ xfs_aio_write_isize_update( ...@@ -418,11 +418,13 @@ xfs_aio_write_isize_update(
*/ */
STATIC void STATIC void
xfs_aio_write_newsize_update( xfs_aio_write_newsize_update(
struct xfs_inode *ip) struct xfs_inode *ip,
xfs_fsize_t new_size)
{ {
if (ip->i_new_size) { if (new_size == ip->i_new_size) {
xfs_rw_ilock(ip, XFS_ILOCK_EXCL); xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
ip->i_new_size = 0; if (new_size == ip->i_new_size)
ip->i_new_size = 0;
if (ip->i_d.di_size > ip->i_size) if (ip->i_d.di_size > ip->i_size)
ip->i_d.di_size = ip->i_size; ip->i_d.di_size = ip->i_size;
xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
...@@ -473,7 +475,7 @@ xfs_file_splice_write( ...@@ -473,7 +475,7 @@ xfs_file_splice_write(
ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
xfs_aio_write_isize_update(inode, ppos, ret); xfs_aio_write_isize_update(inode, ppos, ret);
xfs_aio_write_newsize_update(ip); xfs_aio_write_newsize_update(ip, new_size);
xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret; return ret;
} }
...@@ -670,6 +672,7 @@ xfs_file_aio_write_checks( ...@@ -670,6 +672,7 @@ xfs_file_aio_write_checks(
struct file *file, struct file *file,
loff_t *pos, loff_t *pos,
size_t *count, size_t *count,
xfs_fsize_t *new_sizep,
int *iolock) int *iolock)
{ {
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
...@@ -677,6 +680,8 @@ xfs_file_aio_write_checks( ...@@ -677,6 +680,8 @@ xfs_file_aio_write_checks(
xfs_fsize_t new_size; xfs_fsize_t new_size;
int error = 0; int error = 0;
*new_sizep = 0;
restart:
error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
if (error) { if (error) {
xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
...@@ -684,20 +689,41 @@ xfs_file_aio_write_checks( ...@@ -684,20 +689,41 @@ xfs_file_aio_write_checks(
return error; return error;
} }
new_size = *pos + *count;
if (new_size > ip->i_size)
ip->i_new_size = new_size;
if (likely(!(file->f_mode & FMODE_NOCMTIME))) if (likely(!(file->f_mode & FMODE_NOCMTIME)))
file_update_time(file); file_update_time(file);
/* /*
* If the offset is beyond the size of the file, we need to zero any * If the offset is beyond the size of the file, we need to zero any
* blocks that fall between the existing EOF and the start of this * blocks that fall between the existing EOF and the start of this
* write. * write. There is no need to issue zeroing if another in-flght IO ends
* at or before this one If zeronig is needed and we are currently
* holding the iolock shared, we need to update it to exclusive which
* involves dropping all locks and relocking to maintain correct locking
* order. If we do this, restart the function to ensure all checks and
* values are still valid.
*/ */
if (*pos > ip->i_size) if ((ip->i_new_size && *pos > ip->i_new_size) ||
(!ip->i_new_size && *pos > ip->i_size)) {
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
goto restart;
}
error = -xfs_zero_eof(ip, *pos, ip->i_size); error = -xfs_zero_eof(ip, *pos, ip->i_size);
}
/*
* If this IO extends beyond EOF, we may need to update ip->i_new_size.
* We have already zeroed space beyond EOF (if necessary). Only update
* ip->i_new_size if this IO ends beyond any other in-flight writes.
*/
new_size = *pos + *count;
if (new_size > ip->i_size) {
if (new_size > ip->i_new_size)
ip->i_new_size = new_size;
*new_sizep = new_size;
}
xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
if (error) if (error)
...@@ -744,6 +770,7 @@ xfs_file_dio_aio_write( ...@@ -744,6 +770,7 @@ xfs_file_dio_aio_write(
unsigned long nr_segs, unsigned long nr_segs,
loff_t pos, loff_t pos,
size_t ocount, size_t ocount,
xfs_fsize_t *new_size,
int *iolock) int *iolock)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
...@@ -764,13 +791,20 @@ xfs_file_dio_aio_write( ...@@ -764,13 +791,20 @@ xfs_file_dio_aio_write(
if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
unaligned_io = 1; unaligned_io = 1;
if (unaligned_io || mapping->nrpages || pos > ip->i_size) /*
* We don't need to take an exclusive lock unless there page cache needs
* to be invalidated or unaligned IO is being executed. We don't need to
* consider the EOF extension case here because
* xfs_file_aio_write_checks() will relock the inode as necessary for
* EOF zeroing cases and fill out the new inode size as appropriate.
*/
if (unaligned_io || mapping->nrpages)
*iolock = XFS_IOLOCK_EXCL; *iolock = XFS_IOLOCK_EXCL;
else else
*iolock = XFS_IOLOCK_SHARED; *iolock = XFS_IOLOCK_SHARED;
xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
if (ret) if (ret)
return ret; return ret;
...@@ -809,6 +843,7 @@ xfs_file_buffered_aio_write( ...@@ -809,6 +843,7 @@ xfs_file_buffered_aio_write(
unsigned long nr_segs, unsigned long nr_segs,
loff_t pos, loff_t pos,
size_t ocount, size_t ocount,
xfs_fsize_t *new_size,
int *iolock) int *iolock)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
...@@ -822,7 +857,7 @@ xfs_file_buffered_aio_write( ...@@ -822,7 +857,7 @@ xfs_file_buffered_aio_write(
*iolock = XFS_IOLOCK_EXCL; *iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
if (ret) if (ret)
return ret; return ret;
...@@ -862,6 +897,7 @@ xfs_file_aio_write( ...@@ -862,6 +897,7 @@ xfs_file_aio_write(
ssize_t ret; ssize_t ret;
int iolock; int iolock;
size_t ocount = 0; size_t ocount = 0;
xfs_fsize_t new_size = 0;
XFS_STATS_INC(xs_write_calls); XFS_STATS_INC(xs_write_calls);
...@@ -881,10 +917,10 @@ xfs_file_aio_write( ...@@ -881,10 +917,10 @@ xfs_file_aio_write(
if (unlikely(file->f_flags & O_DIRECT)) if (unlikely(file->f_flags & O_DIRECT))
ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
ocount, &iolock); ocount, &new_size, &iolock);
else else
ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
ocount, &iolock); ocount, &new_size, &iolock);
xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
...@@ -905,7 +941,7 @@ xfs_file_aio_write( ...@@ -905,7 +941,7 @@ xfs_file_aio_write(
} }
out_unlock: out_unlock:
xfs_aio_write_newsize_update(ip); xfs_aio_write_newsize_update(ip, new_size);
xfs_rw_iunlock(ip, iolock); xfs_rw_iunlock(ip, iolock);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment