Commit b027d4c9 authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: don't retry xfs_buf_find on XBF_TRYLOCK failure

When looking at an event trace recently, I noticed that non-blocking
buffer lookup attempts would fail on cached locked buffers and then
run the slow cache-miss path. This means we are doing an xfs_buf
allocation, lookup and free unnecessarily every time we avoid
blocking on a locked buffer.

Fix this by changing _xfs_buf_find() to return an error status to
the caller to indicate that we failed the lock attempt rather than
just returning a NULL. This allows the higher level code to
discriminate between a cache miss and an cache hit that we failed to
lock.

This also allows us to return a -EFSCORRUPTED state if we are asked
to look up a block number outside the range of the filesystem in
_xfs_buf_find(), which moves us one step closer to being able to
handle such errors in a more graceful manner at the higher levels.
Signed-Off-By: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarCarlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 8925a3dc
...@@ -549,17 +549,31 @@ xfs_buf_hash_destroy( ...@@ -549,17 +549,31 @@ xfs_buf_hash_destroy(
} }
/* /*
* Look up (and insert if absent), a lockable buffer for a given * Look up a buffer in the buffer cache and return it referenced and locked
* range of an inode. The buffer is returned locked. No I/O is * in @found_bp.
* implied by this call. *
* If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
* cache.
*
* If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
* -EAGAIN if we fail to lock it.
*
* Return values are:
* -EFSCORRUPTED if have been supplied with an invalid address
* -EAGAIN on trylock failure
* -ENOENT if we fail to find a match and @new_bp was NULL
* 0, with @found_bp:
* - @new_bp if we inserted it into the cache
* - the buffer we found and locked.
*/ */
static struct xfs_buf * static int
_xfs_buf_find( xfs_buf_find(
struct xfs_buftarg *btp, struct xfs_buftarg *btp,
struct xfs_buf_map *map, struct xfs_buf_map *map,
int nmaps, int nmaps,
xfs_buf_flags_t flags, xfs_buf_flags_t flags,
struct xfs_buf *new_bp) struct xfs_buf *new_bp,
struct xfs_buf **found_bp)
{ {
struct xfs_perag *pag; struct xfs_perag *pag;
xfs_buf_t *bp; xfs_buf_t *bp;
...@@ -567,6 +581,8 @@ _xfs_buf_find( ...@@ -567,6 +581,8 @@ _xfs_buf_find(
xfs_daddr_t eofs; xfs_daddr_t eofs;
int i; int i;
*found_bp = NULL;
for (i = 0; i < nmaps; i++) for (i = 0; i < nmaps; i++)
cmap.bm_len += map[i].bm_len; cmap.bm_len += map[i].bm_len;
...@@ -580,16 +596,11 @@ _xfs_buf_find( ...@@ -580,16 +596,11 @@ _xfs_buf_find(
*/ */
eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) { if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
/*
* XXX (dgc): we should really be returning -EFSCORRUPTED here,
* but none of the higher level infrastructure supports
* returning a specific error on buffer lookup failures.
*/
xfs_alert(btp->bt_mount, xfs_alert(btp->bt_mount,
"%s: daddr 0x%llx out of range, EOFS 0x%llx", "%s: daddr 0x%llx out of range, EOFS 0x%llx",
__func__, cmap.bm_bn, eofs); __func__, cmap.bm_bn, eofs);
WARN_ON(1); WARN_ON(1);
return NULL; return -EFSCORRUPTED;
} }
pag = xfs_perag_get(btp->bt_mount, pag = xfs_perag_get(btp->bt_mount,
...@@ -604,19 +615,20 @@ _xfs_buf_find( ...@@ -604,19 +615,20 @@ _xfs_buf_find(
} }
/* No match found */ /* No match found */
if (new_bp) { if (!new_bp) {
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
rhashtable_insert_fast(&pag->pag_buf_hash,
&new_bp->b_rhash_head,
xfs_buf_hash_params);
spin_unlock(&pag->pag_buf_lock);
} else {
XFS_STATS_INC(btp->bt_mount, xb_miss_locked); XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag); xfs_perag_put(pag);
return -ENOENT;
} }
return new_bp;
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
xfs_buf_hash_params);
spin_unlock(&pag->pag_buf_lock);
*found_bp = new_bp;
return 0;
found: found:
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
...@@ -626,7 +638,7 @@ _xfs_buf_find( ...@@ -626,7 +638,7 @@ _xfs_buf_find(
if (flags & XBF_TRYLOCK) { if (flags & XBF_TRYLOCK) {
xfs_buf_rele(bp); xfs_buf_rele(bp);
XFS_STATS_INC(btp->bt_mount, xb_busy_locked); XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
return NULL; return -EAGAIN;
} }
xfs_buf_lock(bp); xfs_buf_lock(bp);
XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited); XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
...@@ -646,7 +658,8 @@ _xfs_buf_find( ...@@ -646,7 +658,8 @@ _xfs_buf_find(
trace_xfs_buf_find(bp, flags, _RET_IP_); trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(btp->bt_mount, xb_get_locked); XFS_STATS_INC(btp->bt_mount, xb_get_locked);
return bp; *found_bp = bp;
return 0;
} }
struct xfs_buf * struct xfs_buf *
...@@ -656,8 +669,14 @@ xfs_buf_incore( ...@@ -656,8 +669,14 @@ xfs_buf_incore(
size_t numblks, size_t numblks,
xfs_buf_flags_t flags) xfs_buf_flags_t flags)
{ {
struct xfs_buf *bp;
int error;
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return _xfs_buf_find(target, &map, 1, flags, NULL);
error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
if (error)
return NULL;
return bp;
} }
/* /*
...@@ -676,9 +695,27 @@ xfs_buf_get_map( ...@@ -676,9 +695,27 @@ xfs_buf_get_map(
struct xfs_buf *new_bp; struct xfs_buf *new_bp;
int error = 0; int error = 0;
bp = _xfs_buf_find(target, map, nmaps, flags, NULL); error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
if (likely(bp))
switch (error) {
case 0:
/* cache hit */
goto found; goto found;
case -EAGAIN:
/* cache hit, trylock failure, caller handles failure */
ASSERT(flags & XBF_TRYLOCK);
return NULL;
case -ENOENT:
/* cache miss, go for insert */
break;
case -EFSCORRUPTED:
default:
/*
* None of the higher layers understand failure types
* yet, so return NULL to signal a fatal lookup error.
*/
return NULL;
}
new_bp = _xfs_buf_alloc(target, map, nmaps, flags); new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
if (unlikely(!new_bp)) if (unlikely(!new_bp))
...@@ -690,8 +727,8 @@ xfs_buf_get_map( ...@@ -690,8 +727,8 @@ xfs_buf_get_map(
return NULL; return NULL;
} }
bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
if (!bp) { if (error) {
xfs_buf_free(new_bp); xfs_buf_free(new_bp);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment