Commit 5d51eff4 authored by David Chinner's avatar David Chinner Committed by Lachlan McIlroy

[XFS] Fix inode allocation latency

The log force added in xfs_iget_core() has been a performance issue since
it was introduced for tight loops that allocate then unlink a single file.
under heavy writeback, this can introduce unnecessary latency due tothe
log I/o getting stuck behind bulk data writes.

Fix this latency problem by avoinding the need for the log force by moving
the place we mark linux inode dirty to the transaction commit rather than
on transaction completion.

This also closes a potential hole in the sync code where a linux inode is
not dirty between the time it is modified and the time the log buffer has
been written to disk.

SGI-PV: 972753
SGI-Modid: xfs-linux-melb:xfs-kern:30007a
Signed-off-by: default avatarDavid Chinner <dgc@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
parent e4143a1c
...@@ -70,6 +70,22 @@ xfs_synchronize_atime( ...@@ -70,6 +70,22 @@ xfs_synchronize_atime(
} }
} }
/*
* If the linux inode exists, mark it dirty.
* Used when commiting a dirty inode into a transaction so that
* the inode will get written back by the linux code
*/
void
xfs_mark_inode_dirty_sync(
xfs_inode_t *ip)
{
bhv_vnode_t *vp;
vp = XFS_ITOV_NULL(ip);
if (vp)
mark_inode_dirty_sync(vn_to_inode(vp));
}
/* /*
* Change the requested timestamp in the given inode. * Change the requested timestamp in the given inode.
* We don't lock across timestamp updates, and we don't log them but * We don't lock across timestamp updates, and we don't log them but
......
...@@ -140,27 +140,9 @@ xfs_iget_core( ...@@ -140,27 +140,9 @@ xfs_iget_core(
return ENOENT; return ENOENT;
} }
/*
* There may be transactions sitting in the
* incore log buffers or being flushed to disk
* at this time. We can't clear the
* XFS_IRECLAIMABLE flag until these
* transactions have hit the disk, otherwise we
* will void the guarantee the flag provides
* xfs_iunpin()
*/
if (xfs_ipincount(ip)) {
read_unlock(&pag->pag_ici_lock);
xfs_log_force(mp, 0,
XFS_LOG_FORCE|XFS_LOG_SYNC);
XFS_STATS_INC(xs_ig_frecycle);
goto again;
}
xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
XFS_STATS_INC(xs_ig_found); XFS_STATS_INC(xs_ig_found);
xfs_iflags_clear(ip, XFS_IRECLAIMABLE); xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
......
...@@ -2814,40 +2814,8 @@ xfs_iunpin( ...@@ -2814,40 +2814,8 @@ xfs_iunpin(
{ {
ASSERT(atomic_read(&ip->i_pincount) > 0); ASSERT(atomic_read(&ip->i_pincount) > 0);
if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { if (atomic_dec_and_test(&ip->i_pincount))
/*
* If the inode is currently being reclaimed, the link between
* the bhv_vnode and the xfs_inode will be broken after the
* XFS_IRECLAIM* flag is set. Hence, if these flags are not
* set, then we can move forward and mark the linux inode dirty
* knowing that it is still valid as it won't freed until after
* the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The
* i_flags_lock is used to synchronise the setting of the
* XFS_IRECLAIM* flags and the breaking of the link, and so we
* can execute atomically w.r.t to reclaim by holding this lock
* here.
*
* However, we still need to issue the unpin wakeup call as the
* inode reclaim may be blocked waiting for the inode to become
* unpinned.
*/
if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) {
bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
struct inode *inode = NULL;
BUG_ON(vp == NULL);
inode = vn_to_inode(vp);
BUG_ON(inode->i_state & I_CLEAR);
/* make sync come back and flush this inode */
if (!(inode->i_state & (I_NEW|I_FREEING)))
mark_inode_dirty_sync(inode);
}
spin_unlock(&ip->i_flags_lock);
wake_up(&ip->i_ipin_wait); wake_up(&ip->i_ipin_wait);
}
} }
/* /*
......
...@@ -532,6 +532,7 @@ xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); ...@@ -532,6 +532,7 @@ xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
void xfs_lock_inodes(xfs_inode_t **, int, int, uint); void xfs_lock_inodes(xfs_inode_t **, int, int, uint);
void xfs_synchronize_atime(xfs_inode_t *); void xfs_synchronize_atime(xfs_inode_t *);
void xfs_mark_inode_dirty_sync(xfs_inode_t *);
xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t,
......
...@@ -274,6 +274,11 @@ xfs_inode_item_format( ...@@ -274,6 +274,11 @@ xfs_inode_item_format(
*/ */
xfs_synchronize_atime(ip); xfs_synchronize_atime(ip);
/*
* make sure the linux inode is dirty
*/
xfs_mark_inode_dirty_sync(ip);
vecp->i_addr = (xfs_caddr_t)&ip->i_d; vecp->i_addr = (xfs_caddr_t)&ip->i_d;
vecp->i_len = sizeof(xfs_dinode_core_t); vecp->i_len = sizeof(xfs_dinode_core_t);
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment