Commit ee138217 authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'btree-remove-btnum-6.9_2024-02-23' of...

Merge tag 'btree-remove-btnum-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.9-mergeC

xfs: remove bc_btnum from btree cursors

From Christoph Hellwig,

This series continues the migration of btree geometry information out of
the cursor structure and into the ops structure.  This time around, we
replace the btree type enumeration (btnum) with an explicit name string
in the btree ops structure.  This enables easy creation of /any/ new
btree type without having to mess with libxfs.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'btree-remove-btnum-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: remove xfs_btnum_t
  xfs: pass a 'bool is_finobt' to xfs_inobt_insert
  xfs: split xfs_inobt_init_cursor
  xfs: split xfs_inobt_insert_sprec
  xfs: remove the which variable in xchk_iallocbt
  xfs: remove the btnum argument to xfs_inobt_count_blocks
  xfs: remove xfs_inobt_cur
  xfs: split xfs_allocbt_init_cursor
  xfs: refactor the btree cursor allocation logic in xchk_ag_btcur_init
  xfs: add a sick_mask to struct xfs_btree_ops
  xfs: add a name field to struct xfs_btree_ops
  xfs: split the agf_roots and agf_levels arrays
  xfs: remove xfs_bmbt_stage_cursor
  xfs: fold xfs_bmbt_init_common into xfs_bmbt_init_cursor
  xfs: make staging file forks explicit
  xfs: make full use of xfs_btree_stage_ifakeroot in xfs_bmbt_stage_cursor
  xfs: remove xfs_rmapbt_stage_cursor
  xfs: fold xfs_rmapbt_init_common into xfs_rmapbt_init_cursor
  xfs: remove xfs_refcountbt_stage_cursor
  xfs: fold xfs_refcountbt_init_common into xfs_refcountbt_init_cursor
  xfs: remove xfs_inobt_stage_cursor
  xfs: fold xfs_inobt_init_common into xfs_inobt_init_cursor
  xfs: remove xfs_allocbt_stage_cursor
  xfs: fold xfs_allocbt_init_common into xfs_allocbt_init_cursor
  xfs: don't override bc_ops for staging btrees
  xfs: add a xfs_btree_init_ptr_from_cur
  xfs: move comment about two 2 keys per pointer in the rmap btree
parents 681cb87b ec793e69
......@@ -669,14 +669,13 @@ xfs_agfblock_init(
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
agf->agf_seqno = cpu_to_be32(id->agno);
agf->agf_length = cpu_to_be32(id->agsize);
agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
agf->agf_bno_root = cpu_to_be32(XFS_BNO_BLOCK(mp));
agf->agf_cnt_root = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_bno_level = cpu_to_be32(1);
agf->agf_cnt_level = cpu_to_be32(1);
if (xfs_has_rmapbt(mp)) {
agf->agf_roots[XFS_BTNUM_RMAPi] =
cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
agf->agf_rmap_root = cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_rmap_level = cpu_to_be32(1);
agf->agf_rmap_blocks = cpu_to_be32(1);
}
......
......@@ -36,8 +36,9 @@ struct xfs_perag {
atomic_t pag_active_ref; /* active reference count */
wait_queue_head_t pag_active_wq;/* woken active_ref falls to zero */
unsigned long pag_opstate;
uint8_t pagf_levels[XFS_BTNUM_AGF];
/* # of levels in bno & cnt btree */
uint8_t pagf_bno_level; /* # of levels in bno btree */
uint8_t pagf_cnt_level; /* # of levels in cnt btree */
uint8_t pagf_rmap_level;/* # of levels in rmap btree */
uint32_t pagf_flcount; /* count of blocks in freelist */
xfs_extlen_t pagf_freeblks; /* total free blocks */
xfs_extlen_t pagf_longest; /* longest free space */
......@@ -86,7 +87,8 @@ struct xfs_perag {
* Alternate btree heights so that online repair won't trip the write
* verifiers while rebuilding the AG btrees.
*/
uint8_t pagf_repair_levels[XFS_BTNUM_AGF];
uint8_t pagf_repair_bno_level;
uint8_t pagf_repair_cnt_level;
uint8_t pagf_repair_refcount_level;
#endif
......
......@@ -273,9 +273,8 @@ xfs_alloc_complain_bad_rec(
struct xfs_mount *mp = cur->bc_mp;
xfs_warn(mp,
"%s Freespace BTree record corruption in AG %d detected at %pS!",
cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
cur->bc_ag.pag->pag_agno, fa);
"%sbt record corruption in AG %d detected at %pS!",
cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
xfs_warn(mp,
"start block 0x%x block count 0x%x", irec->ar_startblock,
irec->ar_blockcount);
......@@ -863,8 +862,8 @@ xfs_alloc_cur_setup(
* attempt a small allocation.
*/
if (!acur->cnt)
acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
args->agbp, args->pag, XFS_BTNUM_CNT);
acur->cnt = xfs_cntbt_init_cursor(args->mp, args->tp,
args->agbp, args->pag);
error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
if (error)
return error;
......@@ -873,11 +872,11 @@ xfs_alloc_cur_setup(
* Allocate the bnobt left and right search cursors.
*/
if (!acur->bnolt)
acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
args->agbp, args->pag, XFS_BTNUM_BNO);
acur->bnolt = xfs_bnobt_init_cursor(args->mp, args->tp,
args->agbp, args->pag);
if (!acur->bnogt)
acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
args->agbp, args->pag, XFS_BTNUM_BNO);
acur->bnogt = xfs_bnobt_init_cursor(args->mp, args->tp,
args->agbp, args->pag);
return i == 1 ? 0 : -ENOSPC;
}
......@@ -919,7 +918,7 @@ xfs_alloc_cur_check(
bool busy;
unsigned busy_gen = 0;
bool deactivate = false;
bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
bool isbnobt = xfs_btree_is_bno(cur->bc_ops);
*new = 0;
......@@ -996,8 +995,7 @@ xfs_alloc_cur_check(
out:
if (deactivate)
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
*new);
trace_xfs_alloc_cur_check(cur, bno, len, diff, *new);
return 0;
}
......@@ -1236,8 +1234,8 @@ xfs_alloc_ag_vextent_exact(
/*
* Allocate/initialize a cursor for the by-number freespace btree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag, XFS_BTNUM_BNO);
bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
args->pag);
/*
* Lookup bno and minlen in the btree (minlen is irrelevant, really).
......@@ -1297,8 +1295,8 @@ xfs_alloc_ag_vextent_exact(
* We are allocating agbno for args->len
* Allocate/initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag, XFS_BTNUM_CNT);
cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag);
ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
......@@ -1712,8 +1710,8 @@ xfs_alloc_ag_vextent_size(
/*
* Allocate and initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag, XFS_BTNUM_CNT);
cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag);
bno_cur = NULL;
/*
......@@ -1898,8 +1896,8 @@ xfs_alloc_ag_vextent_size(
/*
* Allocate and initialize a cursor for the by-block tree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->pag, XFS_BTNUM_BNO);
bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
args->pag);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0;
......@@ -1973,7 +1971,7 @@ xfs_free_ag_extent(
/*
* Allocate and initialize a cursor for the by-block btree.
*/
bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
bno_cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
/*
* Look for a neighboring block on the left (lower block numbers)
* that is contiguous with this space.
......@@ -2047,7 +2045,7 @@ xfs_free_ag_extent(
/*
* Now allocate and initialize a cursor for the by-size tree.
*/
cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
cnt_cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
/*
* Have both left and right contiguous neighbors.
* Merge all three into a single free block.
......@@ -2335,8 +2333,9 @@ xfs_alloc_min_freelist(
struct xfs_perag *pag)
{
/* AG btrees have at least 1 level. */
static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
const unsigned int bno_level = pag ? pag->pagf_bno_level : 1;
const unsigned int cnt_level = pag ? pag->pagf_cnt_level : 1;
const unsigned int rmap_level = pag ? pag->pagf_rmap_level : 1;
unsigned int min_free;
ASSERT(mp->m_alloc_maxlevels > 0);
......@@ -2363,16 +2362,12 @@ xfs_alloc_min_freelist(
*/
/* space needed by-bno freespace btree */
min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
mp->m_alloc_maxlevels) * 2 - 2;
min_free = min(bno_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
/* space needed by-size freespace btree */
min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
mp->m_alloc_maxlevels) * 2 - 2;
min_free += min(cnt_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
/* space needed reverse mapping used space btree */
if (xfs_has_rmapbt(mp))
min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
mp->m_rmap_maxlevels) * 2 - 2;
min_free += min(rmap_level + 1, mp->m_rmap_maxlevels) * 2 - 2;
return min_free;
}
......@@ -2759,8 +2754,8 @@ xfs_exact_minlen_extent_available(
xfs_extlen_t flen;
int error = 0;
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
args->pag, XFS_BTNUM_CNT);
cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, agbp,
args->pag);
error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
if (error)
goto out;
......@@ -3056,8 +3051,8 @@ xfs_alloc_log_agf(
offsetof(xfs_agf_t, agf_versionnum),
offsetof(xfs_agf_t, agf_seqno),
offsetof(xfs_agf_t, agf_length),
offsetof(xfs_agf_t, agf_roots[0]),
offsetof(xfs_agf_t, agf_levels[0]),
offsetof(xfs_agf_t, agf_bno_root), /* also cnt/rmap root */
offsetof(xfs_agf_t, agf_bno_level), /* also cnt/rmap levels */
offsetof(xfs_agf_t, agf_flfirst),
offsetof(xfs_agf_t, agf_fllast),
offsetof(xfs_agf_t, agf_flcount),
......@@ -3236,12 +3231,10 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_freeblks) > agf_length)
return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
mp->m_alloc_maxlevels ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
mp->m_alloc_maxlevels)
if (be32_to_cpu(agf->agf_bno_level) < 1 ||
be32_to_cpu(agf->agf_cnt_level) < 1 ||
be32_to_cpu(agf->agf_bno_level) > mp->m_alloc_maxlevels ||
be32_to_cpu(agf->agf_cnt_level) > mp->m_alloc_maxlevels)
return __this_address;
if (xfs_has_lazysbcount(mp) &&
......@@ -3252,9 +3245,8 @@ xfs_agf_verify(
if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
mp->m_rmap_maxlevels)
if (be32_to_cpu(agf->agf_rmap_level) < 1 ||
be32_to_cpu(agf->agf_rmap_level) > mp->m_rmap_maxlevels)
return __this_address;
}
......@@ -3380,12 +3372,9 @@ xfs_alloc_read_agf(
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
pag->pagf_longest = be32_to_cpu(agf->agf_longest);
pag->pagf_levels[XFS_BTNUM_BNOi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
pag->pagf_levels[XFS_BTNUM_RMAPi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level);
pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
if (xfs_agfl_needs_reset(pag->pag_mount, agf))
set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
......@@ -3414,10 +3403,8 @@ xfs_alloc_read_agf(
ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
ASSERT(pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level));
ASSERT(pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level));
}
#endif
if (agfbpp)
......@@ -4039,7 +4026,7 @@ xfs_alloc_query_range(
union xfs_btree_irec high_brec = { .a = *high_rec };
struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
ASSERT(xfs_btree_is_bno(cur->bc_ops));
return xfs_btree_query_range(cur, &low_brec, &high_brec,
xfs_alloc_query_range_helper, &query);
}
......@@ -4053,7 +4040,7 @@ xfs_alloc_query_all(
{
struct xfs_alloc_query_range_info query;
ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
ASSERT(xfs_btree_is_bno(cur->bc_ops));
query.priv = priv;
query.fn = fn;
return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
......
......@@ -16,6 +16,7 @@
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_error.h"
#include "xfs_health.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_ag.h"
......@@ -23,13 +24,22 @@
static struct kmem_cache *xfs_allocbt_cur_cache;
STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor(
xfs_bnobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
return xfs_bnobt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
cur->bc_ag.pag);
}
STATIC struct xfs_btree_cur *
xfs_cntbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_cntbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
cur->bc_ag.pag);
}
STATIC void
xfs_allocbt_set_root(
struct xfs_btree_cur *cur,
......@@ -38,13 +48,18 @@ xfs_allocbt_set_root(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
cur->bc_ag.pag->pagf_levels[btnum] += inc;
if (xfs_btree_is_bno(cur->bc_ops)) {
agf->agf_bno_root = ptr->s;
be32_add_cpu(&agf->agf_bno_level, inc);
cur->bc_ag.pag->pagf_bno_level += inc;
} else {
agf->agf_cnt_root = ptr->s;
be32_add_cpu(&agf->agf_cnt_level, inc);
cur->bc_ag.pag->pagf_cnt_level += inc;
}
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
......@@ -116,7 +131,7 @@ xfs_allocbt_update_lastrec(
__be32 len;
int numrecs;
ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
ASSERT(!xfs_btree_is_bno(cur->bc_ops));
switch (reason) {
case LASTREC_UPDATE:
......@@ -226,7 +241,10 @@ xfs_allocbt_init_ptr_from_cur(
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
if (xfs_btree_is_bno(cur->bc_ops))
ptr->s = agf->agf_bno_root;
else
ptr->s = agf->agf_cnt_root;
}
STATIC int64_t
......@@ -299,7 +317,6 @@ xfs_allocbt_verify(
struct xfs_perag *pag = bp->b_pag;
xfs_failaddr_t fa;
unsigned int level;
xfs_btnum_t btnum = XFS_BTNUM_BNOi;
if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
......@@ -320,21 +337,27 @@ xfs_allocbt_verify(
* against.
*/
level = be16_to_cpu(block->bb_level);
if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
btnum = XFS_BTNUM_CNTi;
if (pag && xfs_perag_initialised_agf(pag)) {
unsigned int maxlevel = pag->pagf_levels[btnum];
unsigned int maxlevel, repair_maxlevel = 0;
#ifdef CONFIG_XFS_ONLINE_REPAIR
/*
* Online repair could be rewriting the free space btrees, so
* we'll validate against the larger of either tree while this
* is going on.
*/
maxlevel = max_t(unsigned int, maxlevel,
pag->pagf_repair_levels[btnum]);
if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) {
maxlevel = pag->pagf_cnt_level;
#ifdef CONFIG_XFS_ONLINE_REPAIR
repair_maxlevel = pag->pagf_repair_cnt_level;
#endif
if (level >= maxlevel)
} else {
maxlevel = pag->pagf_bno_level;
#ifdef CONFIG_XFS_ONLINE_REPAIR
repair_maxlevel = pag->pagf_repair_bno_level;
#endif
}
if (level >= max(maxlevel, repair_maxlevel))
return __this_address;
} else if (level >= mp->m_alloc_maxlevels)
return __this_address;
......@@ -455,6 +478,7 @@ xfs_allocbt_keys_contiguous(
}
const struct xfs_btree_ops xfs_bnobt_ops = {
.name = "bno",
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_alloc_rec_t),
......@@ -463,8 +487,9 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
.lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtb_2),
.sick_mask = XFS_SICK_AG_BNOBT,
.dup_cursor = xfs_allocbt_dup_cursor,
.dup_cursor = xfs_bnobt_dup_cursor,
.set_root = xfs_allocbt_set_root,
.alloc_block = xfs_allocbt_alloc_block,
.free_block = xfs_allocbt_free_block,
......@@ -484,6 +509,7 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
};
const struct xfs_btree_ops xfs_cntbt_ops = {
.name = "cnt",
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_LASTREC_UPDATE,
......@@ -493,8 +519,9 @@ const struct xfs_btree_ops xfs_cntbt_ops = {
.lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtc_2),
.sick_mask = XFS_SICK_AG_CNTBT,
.dup_cursor = xfs_allocbt_dup_cursor,
.dup_cursor = xfs_cntbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
.alloc_block = xfs_allocbt_alloc_block,
.free_block = xfs_allocbt_free_block,
......@@ -513,65 +540,55 @@ const struct xfs_btree_ops xfs_cntbt_ops = {
.keys_contiguous = NULL, /* not needed right now */
};
/* Allocate most of a new allocation btree cursor. */
STATIC struct xfs_btree_cur *
xfs_allocbt_init_common(
/*
* Allocate a new bnobt cursor.
*
* For staging cursors tp and agbp are NULL.
*/
struct xfs_btree_cur *
xfs_bnobt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_perag *pag,
xfs_btnum_t btnum)
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
const struct xfs_btree_ops *ops = &xfs_bnobt_ops;
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
if (btnum == XFS_BTNUM_CNT)
ops = &xfs_cntbt_ops;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops, mp->m_alloc_maxlevels,
xfs_allocbt_cur_cache);
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bnobt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
/*
* Allocate a new allocation btree cursor.
*/
struct xfs_btree_cur * /* new alloc btree cursor */
xfs_allocbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_buf *agbp, /* buffer for agf structure */
struct xfs_perag *pag,
xfs_btnum_t btnum) /* btree identifier */
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
if (btnum == XFS_BTNUM_CNT)
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
else
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;
cur->bc_nlevels = be32_to_cpu(agf->agf_bno_level);
}
return cur;
}
/* Create a free space btree cursor with a fake root for staging. */
/*
* Allocate a new cntbt cursor.
*
* For staging cursors tp and agbp are NULL.
*/
struct xfs_btree_cur *
xfs_allocbt_stage_cursor(
xfs_cntbt_init_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
struct xfs_perag *pag,
xfs_btnum_t btnum)
struct xfs_trans *tp,
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
xfs_btree_stage_afakeroot(cur, afake);
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_cntbt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;
cur->bc_nlevels = be32_to_cpu(agf->agf_cnt_level);
}
return cur;
}
......@@ -590,15 +607,16 @@ xfs_allocbt_commit_staged_btree(
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
if (cur->bc_btnum == XFS_BTNUM_BNO) {
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
if (xfs_btree_is_bno(cur->bc_ops)) {
agf->agf_bno_root = cpu_to_be32(afake->af_root);
agf->agf_bno_level = cpu_to_be32(afake->af_levels);
} else {
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
agf->agf_cnt_root = cpu_to_be32(afake->af_root);
agf->agf_cnt_level = cpu_to_be32(afake->af_levels);
}
xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
xfs_btree_commit_afakeroot(cur, tp, agbp);
}
/* Calculate number of records in an alloc btree block. */
......
......@@ -47,12 +47,12 @@ struct xbtree_afakeroot;
(maxrecs) * sizeof(xfs_alloc_key_t) + \
((index) - 1) * sizeof(xfs_alloc_ptr_t)))
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *mp,
struct xfs_btree_cur *xfs_bnobt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_perag *pag, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_allocbt_stage_cursor(struct xfs_mount *mp,
struct xbtree_afakeroot *afake, struct xfs_perag *pag,
xfs_btnum_t btnum);
struct xfs_perag *pag);
struct xfs_btree_cur *xfs_cntbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_perag *pag);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
......
......@@ -369,14 +369,6 @@ xfs_bmbt_init_rec_from_cur(
xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
}
STATIC void
xfs_bmbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
ptr->l = 0;
}
STATIC int64_t
xfs_bmbt_key_diff(
struct xfs_btree_cur *cur,
......@@ -525,6 +517,7 @@ xfs_bmbt_keys_contiguous(
}
const struct xfs_btree_ops xfs_bmbt_ops = {
.name = "bmap",
.type = XFS_BTREE_TYPE_INODE,
.rec_len = sizeof(xfs_bmbt_rec_t),
......@@ -544,7 +537,6 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
.init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
.diff_two_keys = xfs_bmbt_diff_two_keys,
.buf_ops = &xfs_bmbt_buf_ops,
......@@ -553,27 +545,10 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.keys_contiguous = xfs_bmbt_keys_contiguous,
};
static struct xfs_btree_cur *
xfs_bmbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork)
{
struct xfs_btree_cur *cur;
ASSERT(whichfork != XFS_COW_FORK);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_ino.ip = ip;
cur->bc_bmap.allocated = 0;
return cur;
}
/*
* Allocate a new bmap btree cursor.
* Create a new bmap btree cursor.
*
* For staging cursors -1 in passed in whichfork.
*/
struct xfs_btree_cur *
xfs_bmbt_init_cursor(
......@@ -582,15 +557,34 @@ xfs_bmbt_init_cursor(
struct xfs_inode *ip,
int whichfork)
{
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur;
unsigned int maxlevels;
cur = xfs_bmbt_init_common(mp, tp, ip, whichfork);
ASSERT(whichfork != XFS_COW_FORK);
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
/*
* The Data fork always has larger maxlevel, so use that for staging
* cursors.
*/
switch (whichfork) {
case XFS_STAGING_FORK:
maxlevels = mp->m_bm_maxlevels[XFS_DATA_FORK];
break;
default:
maxlevels = mp->m_bm_maxlevels[whichfork];
break;
}
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bmbt_ops, maxlevels,
xfs_bmbt_cur_cache);
cur->bc_ino.ip = ip;
cur->bc_ino.whichfork = whichfork;
cur->bc_bmap.allocated = 0;
if (whichfork != XFS_STAGING_FORK) {
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
}
return cur;
}
......@@ -605,33 +599,6 @@ xfs_bmbt_block_maxrecs(
return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
}
/*
* Allocate a new bmap btree cursor for reloading an inode block mapping data
* structure. Note that callers can use the staged cursor to reload extents
* format inode forks if they rebuild the iext tree and commit the staged
* cursor immediately.
*/
struct xfs_btree_cur *
xfs_bmbt_stage_cursor(
struct xfs_mount *mp,
struct xfs_inode *ip,
struct xbtree_ifakeroot *ifake)
{
struct xfs_btree_cur *cur;
struct xfs_btree_ops *ops;
/* data fork always has larger maxheight */
cur = xfs_bmbt_init_common(mp, NULL, ip, XFS_DATA_FORK);
cur->bc_nlevels = ifake->if_levels;
cur->bc_ino.forksize = ifake->if_fork_size;
/* Don't let anyone think we're attached to the real fork yet. */
cur->bc_ino.whichfork = -1;
xfs_btree_stage_ifakeroot(cur, ifake, &ops);
ops->update_cursor = NULL;
return cur;
}
/*
* Swap in the new inode fork root. Once we pass this point the newly rebuilt
* mappings are in place and we have to kill off any old btree blocks.
......@@ -672,7 +639,7 @@ xfs_bmbt_commit_staged_btree(
break;
}
xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
xfs_btree_commit_ifakeroot(cur, tp, whichfork, &xfs_bmbt_ops);
xfs_btree_commit_ifakeroot(cur, tp, whichfork);
}
/*
......
......@@ -107,8 +107,6 @@ extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
struct xfs_btree_cur *xfs_bmbt_stage_cursor(struct xfs_mount *mp,
struct xfs_inode *ip, struct xbtree_ifakeroot *ifake);
void xfs_bmbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, int whichfork);
......
......@@ -298,17 +298,17 @@ xfs_btree_check_ptr(
level))
return 0;
xfs_err(cur->bc_mp,
"Inode %llu fork %d: Corrupt btree %d pointer at level %d index %d.",
"Inode %llu fork %d: Corrupt %sbt pointer at level %d index %d.",
cur->bc_ino.ip->i_ino,
cur->bc_ino.whichfork, cur->bc_btnum,
cur->bc_ino.whichfork, cur->bc_ops->name,
level, index);
} else {
if (xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]),
level))
return 0;
xfs_err(cur->bc_mp,
"AG %u: Corrupt btree %d pointer at level %d index %d.",
cur->bc_ag.pag->pag_agno, cur->bc_btnum,
"AG %u: Corrupt %sbt pointer at level %d index %d.",
cur->bc_ag.pag->pag_agno, cur->bc_ops->name,
level, index);
}
......@@ -407,6 +407,15 @@ xfs_btree_free_block(
trace_xfs_btree_free_block(cur, bp);
/*
* Don't allow block freeing for a staging cursor, because staging
* cursors do not support regular btree modifications.
*/
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
ASSERT(0);
return -EFSCORRUPTED;
}
error = cur->bc_ops->free_block(cur, bp);
if (!error) {
xfs_trans_binval(cur->bc_tp, bp);
......@@ -445,7 +454,7 @@ xfs_btree_del_cursor(
* zero, then we should be shut down or on our way to shutdown due to
* cancelling a dirty transaction on error.
*/
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_bmap.allocated == 0 ||
ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 ||
xfs_is_shutdown(cur->bc_mp) || error != 0);
switch (cur->bc_ops->type) {
......@@ -458,8 +467,6 @@ xfs_btree_del_cursor(
break;
}
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kfree(cur->bc_ops);
kmem_cache_free(cur->bc_cache, cur);
}
......@@ -467,20 +474,26 @@ xfs_btree_del_cursor(
* Duplicate the btree cursor.
* Allocate a new one, copy the record, re-get the buffers.
*/
int /* error */
int /* error */
xfs_btree_dup_cursor(
struct xfs_btree_cur *cur, /* input cursor */
struct xfs_btree_cur **ncur) /* output cursor */
struct xfs_btree_cur *cur, /* input cursor */
struct xfs_btree_cur **ncur) /* output cursor */
{
struct xfs_buf *bp; /* btree block's buffer pointer */
int error; /* error return value */
int i; /* level number of btree block */
xfs_mount_t *mp; /* mount structure for filesystem */
struct xfs_btree_cur *new; /* new cursor value */
xfs_trans_t *tp; /* transaction pointer, can be NULL */
struct xfs_mount *mp = cur->bc_mp;
struct xfs_trans *tp = cur->bc_tp;
struct xfs_buf *bp;
struct xfs_btree_cur *new;
int error;
int i;
tp = cur->bc_tp;
mp = cur->bc_mp;
/*
* Don't allow staging cursors to be duplicated because they're supposed
* to be kept private to a single thread.
*/
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
ASSERT(0);
return -EFSCORRUPTED;
}
/*
* Allocate a new cursor like the old one.
......@@ -1881,6 +1894,27 @@ xfs_lookup_get_search_key(
return xfs_btree_key_addr(cur, keyno, block);
}
/*
* Initialize a pointer to the root block.
*/
void
xfs_btree_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
/*
* Inode-rooted btrees call xfs_btree_get_iroot to find the root
* in xfs_btree_lookup_get_block and don't need a pointer here.
*/
ptr->l = 0;
} else if (cur->bc_flags & XFS_BTREE_STAGING) {
ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root);
} else {
cur->bc_ops->init_ptr_from_cur(cur, ptr);
}
}
/*
* Lookup the record. The cursor is made to point to it, based on dir.
* stat is set to 0 if can't find any such record, 1 for success.
......@@ -1911,7 +1945,7 @@ xfs_btree_lookup(
keyno = 0;
/* initialise start pointer from cursor */
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
xfs_btree_init_ptr_from_cur(cur, &ptr);
pp = &ptr;
/*
......@@ -2697,6 +2731,18 @@ xfs_btree_alloc_block(
{
int error;
/*
* Don't allow block allocation for a staging cursor, because staging
* cursors do not support regular btree modifications.
*
* Bulk loading uses a separate callback to obtain new blocks from a
* preallocated list, which prevents ENOSPC failures during loading.
*/
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
ASSERT(0);
return -EFSCORRUPTED;
}
error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat);
trace_xfs_btree_alloc_block(cur, new_block, *stat, error);
return error;
......@@ -2970,7 +3016,7 @@ xfs_btree_split(
struct xfs_btree_split_args args;
DECLARE_COMPLETION_ONSTACK(done);
if (cur->bc_btnum != XFS_BTNUM_BMAP ||
if (!xfs_btree_is_bmap(cur->bc_ops) ||
cur->bc_tp->t_highest_agno == NULLAGNUMBER)
return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
......@@ -3097,6 +3143,21 @@ xfs_btree_new_iroot(
return error;
}
static void
xfs_btree_set_root(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr,
int inc)
{
if (cur->bc_flags & XFS_BTREE_STAGING) {
/* Update the btree root information for a per-AG fake root. */
cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s);
cur->bc_ag.afake->af_levels += inc;
} else {
cur->bc_ops->set_root(cur, ptr, inc);
}
}
/*
* Allocate a new root block, fill it in.
*/
......@@ -3121,7 +3182,7 @@ xfs_btree_new_root(
XFS_BTREE_STATS_INC(cur, newroot);
/* initialise our start point from the cursor */
cur->bc_ops->init_ptr_from_cur(cur, &rptr);
xfs_btree_init_ptr_from_cur(cur, &rptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat);
......@@ -3137,7 +3198,7 @@ xfs_btree_new_root(
goto error0;
/* Set the root in the holding structure increasing the level by 1. */
cur->bc_ops->set_root(cur, &lptr, 1);
xfs_btree_set_root(cur, &lptr, 1);
/*
* At the previous root level there are now two blocks: the old root,
......@@ -3565,7 +3626,8 @@ xfs_btree_insert(
if (pcur != cur &&
(ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
/* Save the state from the cursor before we trash it */
if (cur->bc_ops->update_cursor)
if (cur->bc_ops->update_cursor &&
!(cur->bc_flags & XFS_BTREE_STAGING))
cur->bc_ops->update_cursor(pcur, cur);
cur->bc_nlevels = pcur->bc_nlevels;
xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
......@@ -3708,7 +3770,7 @@ xfs_btree_kill_root(
* Update the root pointer, decreasing the level by 1 and then
* free the old root.
*/
cur->bc_ops->set_root(cur, newroot, -1);
xfs_btree_set_root(cur, newroot, -1);
error = xfs_btree_free_block(cur, bp);
if (error)
......@@ -4430,7 +4492,7 @@ xfs_btree_visit_blocks(
struct xfs_btree_block *block = NULL;
int error = 0;
cur->bc_ops->init_ptr_from_cur(cur, &lptr);
xfs_btree_init_ptr_from_cur(cur, &lptr);
/* for each level */
for (level = cur->bc_nlevels - 1; level >= 0; level--) {
......@@ -4852,7 +4914,7 @@ xfs_btree_overlapped_query_range(
/* Load the root of the btree. */
level = cur->bc_nlevels - 1;
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
xfs_btree_init_ptr_from_cur(cur, &ptr);
error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
if (error)
return error;
......
......@@ -55,14 +55,6 @@ union xfs_btree_rec {
#define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi)
#define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi)
#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi)
#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi)
#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
#define XFS_BTNUM_REFC ((xfs_btnum_t)XFS_BTNUM_REFCi)
struct xfs_btree_ops;
uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
......@@ -123,6 +115,8 @@ enum xfs_btree_type {
};
struct xfs_btree_ops {
const char *name;
/* Type of btree - AG-rooted or inode-rooted */
enum xfs_btree_type type;
......@@ -140,6 +134,9 @@ struct xfs_btree_ops {
/* offset of btree stats array */
unsigned int statoff;
/* sick mask for health reporting (only for XFS_BTREE_TYPE_AG) */
unsigned int sick_mask;
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
void (*update_cursor)(struct xfs_btree_cur *src,
......@@ -267,7 +264,6 @@ struct xfs_btree_cur
const struct xfs_btree_ops *bc_ops;
struct kmem_cache *bc_cache; /* cursor cache */
unsigned int bc_flags; /* btree features - below */
xfs_btnum_t bc_btnum; /* identifies which btree type */
union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */
......@@ -714,12 +710,13 @@ void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
union xfs_btree_key *dst_key,
const union xfs_btree_key *src_key, int numkeys);
void xfs_btree_init_ptr_from_cur(struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr);
static inline struct xfs_btree_cur *
xfs_btree_alloc_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_btnum_t btnum,
const struct xfs_btree_ops *ops,
uint8_t maxlevels,
struct kmem_cache *cache)
......@@ -735,7 +732,6 @@ xfs_btree_alloc_cursor(
cur->bc_ops = ops;
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_maxlevels = maxlevels;
cur->bc_cache = cache;
......
......@@ -38,63 +38,6 @@
* specific btree type to commit the new btree into the filesystem.
*/
/*
* Don't allow staging cursors to be duplicated because they're supposed to be
* kept private to a single thread.
*/
STATIC struct xfs_btree_cur *
xfs_btree_fakeroot_dup_cursor(
struct xfs_btree_cur *cur)
{
ASSERT(0);
return NULL;
}
/*
* Don't allow block allocation for a staging cursor, because staging cursors
* do not support regular btree modifications.
*
* Bulk loading uses a separate callback to obtain new blocks from a
* preallocated list, which prevents ENOSPC failures during loading.
*/
STATIC int
xfs_btree_fakeroot_alloc_block(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *start_bno,
union xfs_btree_ptr *new_bno,
int *stat)
{
ASSERT(0);
return -EFSCORRUPTED;
}
/*
* Don't allow block freeing for a staging cursor, because staging cursors
* do not support regular btree modifications.
*/
STATIC int
xfs_btree_fakeroot_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
ASSERT(0);
return -EFSCORRUPTED;
}
/* Initialize a pointer to the root block from the fakeroot. */
STATIC void
xfs_btree_fakeroot_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
struct xbtree_afakeroot *afake;
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
afake = cur->bc_ag.afake;
ptr->s = cpu_to_be32(afake->af_root);
}
/*
* Bulk Loading for AG Btrees
* ==========================
......@@ -109,47 +52,20 @@ xfs_btree_fakeroot_init_ptr_from_cur(
* cursor into a regular btree cursor.
*/
/* Update the btree root information for a per-AG fake root. */
STATIC void
xfs_btree_afakeroot_set_root(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr,
int inc)
{
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
afake->af_root = be32_to_cpu(ptr->s);
afake->af_levels += inc;
}
/*
* Initialize a AG-rooted btree cursor with the given AG btree fake root.
* The btree cursor's bc_ops will be overridden as needed to make the staging
* functionality work.
*/
void
xfs_btree_stage_afakeroot(
struct xfs_btree_cur *cur,
struct xbtree_afakeroot *afake)
{
struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(cur->bc_ops->type != XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
nops->alloc_block = xfs_btree_fakeroot_alloc_block;
nops->free_block = xfs_btree_fakeroot_free_block;
nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur;
nops->set_root = xfs_btree_afakeroot_set_root;
nops->dup_cursor = xfs_btree_fakeroot_dup_cursor;
cur->bc_ag.afake = afake;
cur->bc_nlevels = afake->af_levels;
cur->bc_ops = nops;
cur->bc_flags |= XFS_BTREE_STAGING;
}
......@@ -163,18 +79,15 @@ void
xfs_btree_commit_afakeroot(
struct xfs_btree_cur *cur,
struct xfs_trans *tp,
struct xfs_buf *agbp,
const struct xfs_btree_ops *ops)
struct xfs_buf *agbp)
{
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
ASSERT(cur->bc_tp == NULL);
trace_xfs_btree_commit_afakeroot(cur);
kfree((void *)cur->bc_ops);
cur->bc_ag.afake = NULL;
cur->bc_ag.agbp = agbp;
cur->bc_ops = ops;
cur->bc_flags &= ~XFS_BTREE_STAGING;
cur->bc_tp = tp;
}
......@@ -212,29 +125,16 @@ xfs_btree_commit_afakeroot(
void
xfs_btree_stage_ifakeroot(
struct xfs_btree_cur *cur,
struct xbtree_ifakeroot *ifake,
struct xfs_btree_ops **new_ops)
struct xbtree_ifakeroot *ifake)
{
struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
nops->alloc_block = xfs_btree_fakeroot_alloc_block;
nops->free_block = xfs_btree_fakeroot_free_block;
nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur;
nops->dup_cursor = xfs_btree_fakeroot_dup_cursor;
cur->bc_ino.ifake = ifake;
cur->bc_nlevels = ifake->if_levels;
cur->bc_ops = nops;
cur->bc_ino.forksize = ifake->if_fork_size;
cur->bc_flags |= XFS_BTREE_STAGING;
if (new_ops)
*new_ops = nops;
}
/*
......@@ -247,18 +147,15 @@ void
xfs_btree_commit_ifakeroot(
struct xfs_btree_cur *cur,
struct xfs_trans *tp,
int whichfork,
const struct xfs_btree_ops *ops)
int whichfork)
{
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
ASSERT(cur->bc_tp == NULL);
trace_xfs_btree_commit_ifakeroot(cur);
kfree((void *)cur->bc_ops);
cur->bc_ino.ifake = NULL;
cur->bc_ino.whichfork = whichfork;
cur->bc_ops = ops;
cur->bc_flags &= ~XFS_BTREE_STAGING;
cur->bc_tp = tp;
}
......
......@@ -22,7 +22,7 @@ struct xbtree_afakeroot {
void xfs_btree_stage_afakeroot(struct xfs_btree_cur *cur,
struct xbtree_afakeroot *afake);
void xfs_btree_commit_afakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
struct xfs_buf *agbp, const struct xfs_btree_ops *ops);
struct xfs_buf *agbp);
/* Fake root for an inode-rooted btree. */
struct xbtree_ifakeroot {
......@@ -41,10 +41,9 @@ struct xbtree_ifakeroot {
/* Cursor interactions with fake roots for inode-rooted btrees. */
void xfs_btree_stage_ifakeroot(struct xfs_btree_cur *cur,
struct xbtree_ifakeroot *ifake,
struct xfs_btree_ops **new_ops);
struct xbtree_ifakeroot *ifake);
void xfs_btree_commit_ifakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
int whichfork, const struct xfs_btree_ops *ops);
int whichfork);
/* Bulk loading of staged btrees. */
typedef int (*xfs_btree_bload_get_records_fn)(struct xfs_btree_cur *cur,
......
......@@ -477,15 +477,9 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
/*
* Btree number 0 is bno, 1 is cnt, 2 is rmap. This value gives the size of the
* arrays below.
*/
#define XFS_BTNUM_AGF ((int)XFS_BTNUM_RMAPi + 1)
/*
* The second word of agf_levels in the first a.g. overlaps the EFS
* superblock's magic number. Since the magic numbers valid for EFS
* are > 64k, our value cannot be confused for an EFS superblock's.
* agf_cnt_level in the first AGF overlaps the EFS superblock's magic number.
* Since the magic numbers valid for EFS are > 64k, our value cannot be confused
* for an EFS superblock.
*/
typedef struct xfs_agf {
......@@ -499,8 +493,13 @@ typedef struct xfs_agf {
/*
* Freespace and rmap information
*/
__be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
__be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
__be32 agf_bno_root; /* bnobt root block */
__be32 agf_cnt_root; /* cntbt root block */
__be32 agf_rmap_root; /* rmapbt root block */
__be32 agf_bno_level; /* bnobt btree levels */
__be32 agf_cnt_level; /* cntbt btree levels */
__be32 agf_rmap_level; /* rmapbt btree levels */
__be32 agf_flfirst; /* first freelist block's index */
__be32 agf_fllast; /* last freelist block's index */
......
This diff is collapsed.
......@@ -17,6 +17,7 @@
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_health.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_rmap.h"
......@@ -37,7 +38,15 @@ xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
cur->bc_ag.agbp, cur->bc_btnum);
cur->bc_ag.agbp);
}
STATIC struct xfs_btree_cur *
xfs_finobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_finobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
cur->bc_ag.agbp);
}
STATIC void
......@@ -81,9 +90,9 @@ xfs_inobt_mod_blockcount(
if (!xfs_has_inobtcounts(cur->bc_mp))
return;
if (cur->bc_btnum == XFS_BTNUM_FINO)
if (xfs_btree_is_fino(cur->bc_ops))
be32_add_cpu(&agi->agi_fblocks, howmuch);
else if (cur->bc_btnum == XFS_BTNUM_INO)
else
be32_add_cpu(&agi->agi_iblocks, howmuch);
xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
}
......@@ -399,6 +408,7 @@ xfs_inobt_keys_contiguous(
}
const struct xfs_btree_ops xfs_inobt_ops = {
.name = "ino",
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t),
......@@ -407,6 +417,7 @@ const struct xfs_btree_ops xfs_inobt_ops = {
.lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
.sick_mask = XFS_SICK_AG_INOBT,
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root,
......@@ -427,6 +438,7 @@ const struct xfs_btree_ops xfs_inobt_ops = {
};
const struct xfs_btree_ops xfs_finobt_ops = {
.name = "fino",
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t),
......@@ -435,8 +447,9 @@ const struct xfs_btree_ops xfs_finobt_ops = {
.lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
.sick_mask = XFS_SICK_AG_FINOBT,
.dup_cursor = xfs_inobt_dup_cursor,
.dup_cursor = xfs_finobt_dup_cursor,
.set_root = xfs_finobt_set_root,
.alloc_block = xfs_finobt_alloc_block,
.free_block = xfs_finobt_free_block,
......@@ -455,60 +468,54 @@ const struct xfs_btree_ops xfs_finobt_ops = {
};
/*
* Initialize a new inode btree cursor.
* Create an inode btree cursor.
*
* For staging cursors tp and agbp are NULL.
*/
static struct xfs_btree_cur *
xfs_inobt_init_common(
struct xfs_btree_cur *
xfs_inobt_init_cursor(
struct xfs_perag *pag,
struct xfs_trans *tp, /* transaction pointer */
xfs_btnum_t btnum) /* ialloc or free ino btree */
struct xfs_trans *tp,
struct xfs_buf *agbp)
{
struct xfs_mount *mp = pag->pag_mount;
const struct xfs_btree_ops *ops = &xfs_inobt_ops;
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
if (btnum == XFS_BTNUM_FINO)
ops = &xfs_finobt_ops;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops,
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.agbp = agbp;
if (agbp) {
struct xfs_agi *agi = agbp->b_addr;
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
}
return cur;
}
/* Create an inode btree cursor. */
/*
* Create a free inode btree cursor.
*
* For staging cursors tp and agbp are NULL.
*/
struct xfs_btree_cur *
xfs_inobt_init_cursor(
xfs_finobt_init_cursor(
struct xfs_perag *pag,
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_btnum_t btnum)
struct xfs_buf *agbp)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_btree_cur *cur;
struct xfs_agi *agi = agbp->b_addr;
cur = xfs_inobt_init_common(pag, tp, btnum);
if (btnum == XFS_BTNUM_INO)
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
else
cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.agbp = agbp;
return cur;
}
/* Create an inode btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_inobt_stage_cursor(
struct xfs_perag *pag,
struct xbtree_afakeroot *afake,
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
if (agbp) {
struct xfs_agi *agi = agbp->b_addr;
cur = xfs_inobt_init_common(pag, NULL, btnum);
xfs_btree_stage_afakeroot(cur, afake);
cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
}
return cur;
}
......@@ -528,7 +535,7 @@ xfs_inobt_commit_staged_btree(
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
if (cur->bc_btnum == XFS_BTNUM_INO) {
if (xfs_btree_is_ino(cur->bc_ops)) {
fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
agi->agi_root = cpu_to_be32(afake->af_root);
agi->agi_level = cpu_to_be32(afake->af_levels);
......@@ -537,7 +544,7 @@ xfs_inobt_commit_staged_btree(
fields |= XFS_AGI_IBLOCKS;
}
xfs_ialloc_log_agi(tp, agbp, fields);
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
xfs_btree_commit_afakeroot(cur, tp, agbp);
} else {
fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
agi->agi_free_root = cpu_to_be32(afake->af_root);
......@@ -547,7 +554,7 @@ xfs_inobt_commit_staged_btree(
fields |= XFS_AGI_IBLOCKS;
}
xfs_ialloc_log_agi(tp, agbp, fields);
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
xfs_btree_commit_afakeroot(cur, tp, agbp);
}
}
......@@ -728,45 +735,21 @@ xfs_inobt_max_size(
XFS_INODES_PER_CHUNK);
}
/* Read AGI and create inobt cursor. */
int
xfs_inobt_cur(
struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_btnum_t which,
struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp)
{
struct xfs_btree_cur *cur;
int error;
ASSERT(*agi_bpp == NULL);
ASSERT(*curpp == NULL);
error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
if (error)
return error;
cur = xfs_inobt_init_cursor(pag, tp, *agi_bpp, which);
*curpp = cur;
return 0;
}
static int
xfs_inobt_count_blocks(
xfs_finobt_count_blocks(
struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks)
{
struct xfs_buf *agbp = NULL;
struct xfs_btree_cur *cur = NULL;
struct xfs_btree_cur *cur;
int error;
error = xfs_inobt_cur(pag, tp, btnum, &cur, &agbp);
error = xfs_ialloc_read_agi(pag, tp, &agbp);
if (error)
return error;
cur = xfs_inobt_init_cursor(pag, tp, agbp);
error = xfs_btree_count_blocks(cur, tree_blocks);
xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
......@@ -814,8 +797,7 @@ xfs_finobt_calc_reserves(
if (xfs_has_inobtcounts(pag->pag_mount))
error = xfs_finobt_read_blocks(pag, tp, &tree_len);
else
error = xfs_inobt_count_blocks(pag, tp, XFS_BTNUM_FINO,
&tree_len);
error = xfs_finobt_count_blocks(pag, tp, &tree_len);
if (error)
return error;
......
......@@ -46,10 +46,10 @@ struct xfs_perag;
(maxrecs) * sizeof(xfs_inobt_key_t) + \
((index) - 1) * sizeof(xfs_inobt_ptr_t)))
extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_buf *agbp, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_perag *pag,
struct xbtree_afakeroot *afake, xfs_btnum_t btnum);
struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_buf *agbp);
struct xfs_btree_cur *xfs_finobt_init_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_buf *agbp);
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
/* ir_holemask to inode allocation bitmap conversion */
......@@ -66,9 +66,6 @@ int xfs_finobt_calc_reserves(struct xfs_perag *perag, struct xfs_trans *tp,
xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
int xfs_inobt_cur(struct xfs_perag *pag, struct xfs_trans *tp,
xfs_btnum_t btnum, struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp);
void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp);
......
......@@ -16,6 +16,7 @@
#include "xfs_refcount.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_health.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
#include "xfs_bit.h"
......@@ -318,6 +319,7 @@ xfs_refcountbt_keys_contiguous(
}
const struct xfs_btree_ops xfs_refcountbt_ops = {
.name = "refcount",
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(struct xfs_refcount_rec),
......@@ -326,6 +328,7 @@ const struct xfs_btree_ops xfs_refcountbt_ops = {
.lru_refs = XFS_REFC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2),
.sick_mask = XFS_SICK_AG_REFCNTBT,
.dup_cursor = xfs_refcountbt_dup_cursor,
.set_root = xfs_refcountbt_set_root,
......@@ -346,55 +349,32 @@ const struct xfs_btree_ops xfs_refcountbt_ops = {
};
/*
* Initialize a new refcount btree cursor.
* Create a new refcount btree cursor.
*
* For staging cursors tp and agbp are NULL.
*/
static struct xfs_btree_cur *
xfs_refcountbt_init_common(
struct xfs_btree_cur *
xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
&xfs_refcountbt_ops, mp->m_refc_maxlevels,
xfs_refcountbt_cur_cache);
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops,
mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_refc.nr_ops = 0;
cur->bc_refc.shape_changes = 0;
return cur;
}
/* Create a btree cursor. */
struct xfs_btree_cur *
xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
cur = xfs_refcountbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
cur->bc_ag.agbp = agbp;
return cur;
}
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;
/* Create a btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_refcountbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
cur = xfs_refcountbt_init_common(mp, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
}
return cur;
}
......@@ -419,7 +399,7 @@ xfs_refcountbt_commit_staged_btree(
xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
XFS_AGF_REFCOUNT_ROOT |
XFS_AGF_REFCOUNT_LEVEL);
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
xfs_btree_commit_afakeroot(cur, tp, agbp);
}
/* Calculate number of records in a refcount btree block. */
......
......@@ -48,8 +48,6 @@ struct xbtree_afakeroot;
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp,
struct xfs_perag *pag);
struct xfs_btree_cur *xfs_refcountbt_stage_cursor(struct xfs_mount *mp,
struct xbtree_afakeroot *afake, struct xfs_perag *pag);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
......
......@@ -16,6 +16,7 @@
#include "xfs_btree_staging.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_health.h"
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
......@@ -65,13 +66,12 @@ xfs_rmapbt_set_root(
{
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
int btnum = cur->bc_btnum;
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
cur->bc_ag.pag->pagf_levels[btnum] += inc;
agf->agf_rmap_root = ptr->s;
be32_add_cpu(&agf->agf_rmap_level, inc);
cur->bc_ag.pag->pagf_rmap_level += inc;
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
......@@ -222,7 +222,7 @@ xfs_rmapbt_init_ptr_from_cur(
ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
ptr->s = agf->agf_rmap_root;
}
/*
......@@ -342,7 +342,7 @@ xfs_rmapbt_verify(
level = be16_to_cpu(block->bb_level);
if (pag && xfs_perag_initialised_agf(pag)) {
if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
if (level >= pag->pagf_rmap_level)
return __this_address;
} else if (level >= mp->m_rmap_maxlevels)
return __this_address;
......@@ -473,15 +473,18 @@ xfs_rmapbt_keys_contiguous(
}
const struct xfs_btree_ops xfs_rmapbt_ops = {
.name = "rmap",
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_OVERLAPPING,
.rec_len = sizeof(struct xfs_rmap_rec),
/* Overlapping btree; 2 keys per pointer. */
.key_len = 2 * sizeof(struct xfs_rmap_key),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_RMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_rmap_2),
.sick_mask = XFS_SICK_AG_RMAPBT,
.dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root,
......@@ -501,22 +504,11 @@ const struct xfs_btree_ops xfs_rmapbt_ops = {
.keys_contiguous = xfs_rmapbt_keys_contiguous,
};
static struct xfs_btree_cur *
xfs_rmapbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
/* Create a new reverse mapping btree cursor. */
/*
* Create a new reverse mapping btree cursor.
*
* For staging cursors tp and agbp are NULL.
*/
struct xfs_btree_cur *
xfs_rmapbt_init_cursor(
struct xfs_mount *mp,
......@@ -524,26 +516,17 @@ xfs_rmapbt_init_cursor(
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
cur = xfs_rmapbt_init_common(mp, tp, pag);
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.agbp = agbp;
return cur;
}
if (agbp) {
struct xfs_agf *agf = agbp->b_addr;
/* Create a new reverse mapping btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_rmapbt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
struct xfs_perag *pag)
{
struct xfs_btree_cur *cur;
cur = xfs_rmapbt_init_common(mp, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
cur->bc_nlevels = be32_to_cpu(agf->agf_rmap_level);
}
return cur;
}
......@@ -562,12 +545,12 @@ xfs_rmapbt_commit_staged_btree(
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
agf->agf_rmap_root = cpu_to_be32(afake->af_root);
agf->agf_rmap_level = cpu_to_be32(afake->af_levels);
agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
XFS_AGF_RMAP_BLOCKS);
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
xfs_btree_commit_afakeroot(cur, tp, agbp);
}
/* Calculate number of records in a reverse mapping btree block. */
......
......@@ -44,8 +44,6 @@ struct xbtree_afakeroot;
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_perag *pag);
struct xfs_btree_cur *xfs_rmapbt_stage_cursor(struct xfs_mount *mp,
struct xbtree_afakeroot *afake, struct xfs_perag *pag);
void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp);
int xfs_rmapbt_maxrecs(int blocklen, int leaf);
......
......@@ -52,6 +52,41 @@ extern const struct xfs_btree_ops xfs_bmbt_ops;
extern const struct xfs_btree_ops xfs_refcountbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_ops;
static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops)
{
return ops == &xfs_bnobt_ops;
}
static inline bool xfs_btree_is_cnt(const struct xfs_btree_ops *ops)
{
return ops == &xfs_cntbt_ops;
}
static inline bool xfs_btree_is_bmap(const struct xfs_btree_ops *ops)
{
return ops == &xfs_bmbt_ops;
}
static inline bool xfs_btree_is_ino(const struct xfs_btree_ops *ops)
{
return ops == &xfs_inobt_ops;
}
static inline bool xfs_btree_is_fino(const struct xfs_btree_ops *ops)
{
return ops == &xfs_finobt_ops;
}
static inline bool xfs_btree_is_refcount(const struct xfs_btree_ops *ops)
{
return ops == &xfs_refcountbt_ops;
}
static inline bool xfs_btree_is_rmap(const struct xfs_btree_ops *ops)
{
return ops == &xfs_rmapbt_ops;
}
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
......
......@@ -80,11 +80,13 @@ typedef void * xfs_failaddr_t;
/*
* Inode fork identifiers.
*/
#define XFS_DATA_FORK 0
#define XFS_ATTR_FORK 1
#define XFS_COW_FORK 2
#define XFS_STAGING_FORK (-1) /* fake fork for staging a btree */
#define XFS_DATA_FORK (0)
#define XFS_ATTR_FORK (1)
#define XFS_COW_FORK (2)
#define XFS_WHICHFORK_STRINGS \
{ XFS_STAGING_FORK, "staging" }, \
{ XFS_DATA_FORK, "data" }, \
{ XFS_ATTR_FORK, "attr" }, \
{ XFS_COW_FORK, "cow" }
......@@ -114,24 +116,6 @@ typedef enum {
{ XFS_LOOKUP_LEi, "le" }, \
{ XFS_LOOKUP_GEi, "ge" }
/*
* This enum is used in string mapping in xfs_trace.h and scrub/trace.h;
* please keep the TRACE_DEFINE_ENUMs for it up to date.
*/
typedef enum {
XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_RMAPi, XFS_BTNUM_BMAPi,
XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_REFCi, XFS_BTNUM_MAX
} xfs_btnum_t;
#define XFS_BTNUM_STRINGS \
{ XFS_BTNUM_BNOi, "bnobt" }, \
{ XFS_BTNUM_CNTi, "cntbt" }, \
{ XFS_BTNUM_RMAPi, "rmapbt" }, \
{ XFS_BTNUM_BMAPi, "bmbt" }, \
{ XFS_BTNUM_INOi, "inobt" }, \
{ XFS_BTNUM_FINOi, "finobt" }, \
{ XFS_BTNUM_REFCi, "refcbt" }
struct xfs_name {
const unsigned char *name;
int len;
......
......@@ -556,28 +556,28 @@ xchk_agf(
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
/* Check the AGF btree roots and levels */
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
agbno = be32_to_cpu(agf->agf_bno_root);
if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
agbno = be32_to_cpu(agf->agf_cnt_root);
if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
level = be32_to_cpu(agf->agf_bno_level);
if (level <= 0 || level > mp->m_alloc_maxlevels)
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
level = be32_to_cpu(agf->agf_cnt_level);
if (level <= 0 || level > mp->m_alloc_maxlevels)
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
if (xfs_has_rmapbt(mp)) {
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
agbno = be32_to_cpu(agf->agf_rmap_root);
if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
level = be32_to_cpu(agf->agf_rmap_level);
if (level <= 0 || level > mp->m_rmap_maxlevels)
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
}
......
......@@ -174,8 +174,7 @@ xrep_agf_find_btrees(
* We relied on the rmapbt to reconstruct the AGF. If we get a
* different root then something's seriously wrong.
*/
if (fab[XREP_AGF_RMAPBT].root !=
be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi]))
if (fab[XREP_AGF_RMAPBT].root != be32_to_cpu(old_agf->agf_rmap_root))
return -EFSCORRUPTED;
/* We must find the refcountbt root if that feature is enabled. */
......@@ -224,20 +223,14 @@ xrep_agf_set_roots(
struct xfs_agf *agf,
struct xrep_find_ag_btree *fab)
{
agf->agf_roots[XFS_BTNUM_BNOi] =
cpu_to_be32(fab[XREP_AGF_BNOBT].root);
agf->agf_levels[XFS_BTNUM_BNOi] =
cpu_to_be32(fab[XREP_AGF_BNOBT].height);
agf->agf_bno_root = cpu_to_be32(fab[XREP_AGF_BNOBT].root);
agf->agf_bno_level = cpu_to_be32(fab[XREP_AGF_BNOBT].height);
agf->agf_roots[XFS_BTNUM_CNTi] =
cpu_to_be32(fab[XREP_AGF_CNTBT].root);
agf->agf_levels[XFS_BTNUM_CNTi] =
cpu_to_be32(fab[XREP_AGF_CNTBT].height);
agf->agf_cnt_root = cpu_to_be32(fab[XREP_AGF_CNTBT].root);
agf->agf_cnt_level = cpu_to_be32(fab[XREP_AGF_CNTBT].height);
agf->agf_roots[XFS_BTNUM_RMAPi] =
cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
agf->agf_levels[XFS_BTNUM_RMAPi] =
cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
agf->agf_rmap_root = cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
agf->agf_rmap_level = cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
if (xfs_has_reflink(sc->mp)) {
agf->agf_refcount_root =
......@@ -262,8 +255,7 @@ xrep_agf_calc_from_btrees(
int error;
/* Update the AGF counters from the bnobt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag, XFS_BTNUM_BNO);
cur = xfs_bnobt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
if (error)
goto err;
......@@ -276,8 +268,7 @@ xrep_agf_calc_from_btrees(
agf->agf_longest = cpu_to_be32(raa.longest);
/* Update the AGF counters from the cntbt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag, XFS_BTNUM_CNT);
cur = xfs_cntbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
......@@ -333,12 +324,9 @@ xrep_agf_commit_new(
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
pag->pagf_longest = be32_to_cpu(agf->agf_longest);
pag->pagf_levels[XFS_BTNUM_BNOi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
pag->pagf_levels[XFS_BTNUM_RMAPi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level);
pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
......@@ -559,16 +547,14 @@ xrep_agfl_collect_blocks(
goto out_bmp;
/* Find all blocks currently being used by the bnobt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag, XFS_BTNUM_BNO);
cur = xfs_bnobt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur);
xfs_btree_del_cursor(cur, error);
if (error)
goto out_bmp;
/* Find all blocks currently being used by the cntbt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag, XFS_BTNUM_CNT);
cur = xfs_cntbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur);
xfs_btree_del_cursor(cur, error);
if (error)
......@@ -908,7 +894,7 @@ xrep_agi_calc_from_btrees(
xfs_agino_t freecount;
int error;
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp, XFS_BTNUM_INO);
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
error = xfs_ialloc_count_inodes(cur, &count, &freecount);
if (error)
goto err;
......@@ -928,8 +914,7 @@ xrep_agi_calc_from_btrees(
if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) {
xfs_agblock_t blocks;
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp,
XFS_BTNUM_FINO);
cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
error = xfs_btree_count_blocks(cur, &blocks);
if (error)
goto err;
......
......@@ -687,8 +687,8 @@ xrep_abt_reset_counters(
* height values before re-initializing the perag info from the updated
* AGF to capture all the new values.
*/
pag->pagf_repair_levels[XFS_BTNUM_BNOi] = pag->pagf_levels[XFS_BTNUM_BNOi];
pag->pagf_repair_levels[XFS_BTNUM_CNTi] = pag->pagf_levels[XFS_BTNUM_CNTi];
pag->pagf_repair_bno_level = pag->pagf_bno_level;
pag->pagf_repair_cnt_level = pag->pagf_cnt_level;
/* Reinitialize with the values we just logged. */
return xrep_reinit_pagf(sc);
......@@ -735,10 +735,11 @@ xrep_abt_build_new_trees(
ra->new_cntbt.bload.claim_block = xrep_abt_claim_block;
/* Allocate cursors for the staged btrees. */
bno_cur = xfs_allocbt_stage_cursor(sc->mp, &ra->new_bnobt.afake,
pag, XFS_BTNUM_BNO);
cnt_cur = xfs_allocbt_stage_cursor(sc->mp, &ra->new_cntbt.afake,
pag, XFS_BTNUM_CNT);
bno_cur = xfs_bnobt_init_cursor(sc->mp, NULL, NULL, pag);
xfs_btree_stage_afakeroot(bno_cur, &ra->new_bnobt.afake);
cnt_cur = xfs_cntbt_init_cursor(sc->mp, NULL, NULL, pag);
xfs_btree_stage_afakeroot(cnt_cur, &ra->new_cntbt.afake);
/* Last chance to abort before we start committing fixes. */
if (xchk_should_terminate(sc, &error))
......@@ -765,10 +766,8 @@ xrep_abt_build_new_trees(
* height so that we don't trip the verifiers when writing the new
* btree blocks to disk.
*/
pag->pagf_repair_levels[XFS_BTNUM_BNOi] =
ra->new_bnobt.bload.btree_height;
pag->pagf_repair_levels[XFS_BTNUM_CNTi] =
ra->new_cntbt.bload.btree_height;
pag->pagf_repair_bno_level = ra->new_bnobt.bload.btree_height;
pag->pagf_repair_cnt_level = ra->new_cntbt.bload.btree_height;
/* Load the free space by length tree. */
ra->array_cur = XFARRAY_CURSOR_INIT;
......@@ -807,8 +806,8 @@ xrep_abt_build_new_trees(
return xrep_roll_ag_trans(sc);
err_levels:
pag->pagf_repair_levels[XFS_BTNUM_BNOi] = 0;
pag->pagf_repair_levels[XFS_BTNUM_CNTi] = 0;
pag->pagf_repair_bno_level = 0;
pag->pagf_repair_cnt_level = 0;
err_cur:
xfs_btree_del_cursor(cnt_cur, error);
xfs_btree_del_cursor(bno_cur, error);
......@@ -838,8 +837,8 @@ xrep_abt_remove_old_trees(
* Now that we've zapped all the old allocbt blocks we can turn off
* the alternate height mechanism.
*/
pag->pagf_repair_levels[XFS_BTNUM_BNOi] = 0;
pag->pagf_repair_levels[XFS_BTNUM_CNTi] = 0;
pag->pagf_repair_bno_level = 0;
pag->pagf_repair_cnt_level = 0;
return 0;
}
......
......@@ -639,7 +639,13 @@ xrep_bmap_build_new_fork(
rb->new_bmapbt.bload.get_records = xrep_bmap_get_records;
rb->new_bmapbt.bload.claim_block = xrep_bmap_claim_block;
rb->new_bmapbt.bload.iroot_size = xrep_bmap_iroot_size;
bmap_cur = xfs_bmbt_stage_cursor(sc->mp, sc->ip, ifake);
/*
* Allocate a new bmap btree cursor for reloading an inode block mapping
* data structure.
*/
bmap_cur = xfs_bmbt_init_cursor(sc->mp, NULL, sc->ip, XFS_STAGING_FORK);
xfs_btree_stage_ifakeroot(bmap_cur, ifake);
/*
* Figure out the size and format of the new fork, then fill it with
......
......@@ -374,14 +374,12 @@ xchk_btree_check_block_owner(
{
xfs_agnumber_t agno;
xfs_agblock_t agbno;
xfs_btnum_t btnum;
bool init_sa;
int error = 0;
if (!bs->cur)
return 0;
btnum = bs->cur->bc_btnum;
agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
......@@ -404,11 +402,11 @@ xchk_btree_check_block_owner(
* have to nullify it (to shut down further block owner checks) if
* self-xref encounters problems.
*/
if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
if (!bs->sc->sa.bno_cur && xfs_btree_is_bno(bs->cur->bc_ops))
bs->cur = NULL;
xchk_xref_is_only_owned_by(bs->sc, agbno, 1, bs->oinfo);
if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
if (!bs->sc->sa.rmap_cur && xfs_btree_is_rmap(bs->cur->bc_ops))
bs->cur = NULL;
out_free:
......@@ -447,7 +445,7 @@ xchk_btree_check_owner(
* duplicate cursors. Therefore, save the buffer daddr for
* later scanning.
*/
if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
if (xfs_btree_is_bno(cur->bc_ops) || xfs_btree_is_rmap(cur->bc_ops)) {
struct check_owner *co;
co = kmalloc(sizeof(struct check_owner), XCHK_GFP_FLAGS);
......@@ -480,7 +478,7 @@ xchk_btree_check_iroot_minrecs(
* existing filesystems, so instead we disable the check for data fork
* bmap btrees when there's an attr fork.
*/
if (bs->cur->bc_btnum == XFS_BTNUM_BMAP &&
if (xfs_btree_is_bmap(bs->cur->bc_ops) &&
bs->cur->bc_ino.whichfork == XFS_DATA_FORK &&
xfs_inode_has_attr_fork(bs->sc->ip))
return false;
......@@ -733,7 +731,7 @@ xchk_btree(
* error codes for us.
*/
level = cur->bc_nlevels - 1;
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
xfs_btree_init_ptr_from_cur(cur, &ptr);
if (!xchk_btree_ptr_ok(bs, cur->bc_nlevels, &ptr))
goto out;
error = xchk_btree_get_block(bs, level, &ptr, &block, &bp);
......
......@@ -588,46 +588,50 @@ xchk_ag_btcur_init(
{
struct xfs_mount *mp = sc->mp;
if (sa->agf_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
if (sa->agf_bp) {
/* Set up a bnobt cursor for cross-referencing. */
sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->pag, XFS_BTNUM_BNO);
}
sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur,
XFS_SCRUB_TYPE_BNOBT);
if (sa->agf_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
/* Set up a cntbt cursor for cross-referencing. */
sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->pag, XFS_BTNUM_CNT);
}
/* Set up a inobt cursor for cross-referencing. */
if (sa->agi_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, sa->agi_bp,
XFS_BTNUM_INO);
}
/* Set up a finobt cursor for cross-referencing. */
if (sa->agi_bp && xfs_has_finobt(mp) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
sa->fino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, sa->agi_bp,
XFS_BTNUM_FINO);
}
/* Set up a rmapbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_has_rmapbt(mp) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp,
sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur,
XFS_SCRUB_TYPE_CNTBT);
/* Set up a rmapbt cursor for cross-referencing. */
if (xfs_has_rmapbt(mp)) {
sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp,
sa->agf_bp, sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur,
XFS_SCRUB_TYPE_RMAPBT);
}
/* Set up a refcountbt cursor for cross-referencing. */
if (xfs_has_reflink(mp)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
sa->agf_bp, sa->pag);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur,
XFS_SCRUB_TYPE_REFCNTBT);
}
}
/* Set up a refcountbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_has_reflink(mp) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
sa->agf_bp, sa->pag);
if (sa->agi_bp) {
/* Set up a inobt cursor for cross-referencing. */
sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp,
sa->agi_bp);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur,
XFS_SCRUB_TYPE_INOBT);
/* Set up a finobt cursor for cross-referencing. */
if (xfs_has_finobt(mp)) {
sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp,
sa->agi_bp);
xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur,
XFS_SCRUB_TYPE_FINOBT);
}
}
}
......
......@@ -248,13 +248,13 @@ xchk_update_health(
}
/* Is the given per-AG btree healthy enough for scanning? */
bool
xchk_ag_btree_healthy_enough(
void
xchk_ag_btree_del_cursor_if_sick(
struct xfs_scrub *sc,
struct xfs_perag *pag,
xfs_btnum_t btnum)
struct xfs_btree_cur **curp,
unsigned int sm_type)
{
unsigned int mask = 0;
unsigned int mask = (*curp)->bc_ops->sick_mask;
/*
* We always want the cursor if it's the same type as whatever we're
......@@ -263,41 +263,8 @@ xchk_ag_btree_healthy_enough(
* Otherwise, we're only interested in the btree for cross-referencing.
* If we know the btree is bad then don't bother, just set XFAIL.
*/
switch (btnum) {
case XFS_BTNUM_BNO:
if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
return true;
mask = XFS_SICK_AG_BNOBT;
break;
case XFS_BTNUM_CNT:
if (sc->sm->sm_type == XFS_SCRUB_TYPE_CNTBT)
return true;
mask = XFS_SICK_AG_CNTBT;
break;
case XFS_BTNUM_INO:
if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
return true;
mask = XFS_SICK_AG_INOBT;
break;
case XFS_BTNUM_FINO:
if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
return true;
mask = XFS_SICK_AG_FINOBT;
break;
case XFS_BTNUM_RMAP:
if (sc->sm->sm_type == XFS_SCRUB_TYPE_RMAPBT)
return true;
mask = XFS_SICK_AG_RMAPBT;
break;
case XFS_BTNUM_REFC:
if (sc->sm->sm_type == XFS_SCRUB_TYPE_REFCNTBT)
return true;
mask = XFS_SICK_AG_REFCNTBT;
break;
default:
ASSERT(0);
return true;
}
if (sc->sm->sm_type == sm_type)
return;
/*
* If we just repaired some AG metadata, sc->sick_mask will reflect all
......@@ -309,12 +276,11 @@ xchk_ag_btree_healthy_enough(
type_to_health_flag[sc->sm->sm_type].group == XHG_AG)
mask &= ~sc->sick_mask;
if (xfs_ag_has_sickness(pag, mask)) {
if (xfs_ag_has_sickness((*curp)->bc_ag.pag, mask)) {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
return false;
xfs_btree_del_cursor(*curp, XFS_BTREE_NOERROR);
*curp = NULL;
}
return true;
}
/*
......
......@@ -8,8 +8,8 @@
unsigned int xchk_health_mask_for_scrub_type(__u32 scrub_type);
void xchk_update_health(struct xfs_scrub *sc);
bool xchk_ag_btree_healthy_enough(struct xfs_scrub *sc, struct xfs_perag *pag,
xfs_btnum_t btnum);
void xchk_ag_btree_del_cursor_if_sick(struct xfs_scrub *sc,
struct xfs_btree_cur **curp, unsigned int sm_type);
void xchk_mark_healthy_if_clean(struct xfs_scrub *sc, unsigned int mask);
bool xchk_file_looks_zapped(struct xfs_scrub *sc, unsigned int mask);
int xchk_health_record(struct xfs_scrub *sc);
......
......@@ -76,7 +76,7 @@ xchk_inobt_xref_finobt(
int has_record;
int error;
ASSERT(cur->bc_btnum == XFS_BTNUM_FINO);
ASSERT(xfs_btree_is_fino(cur->bc_ops));
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record);
if (error)
......@@ -179,7 +179,7 @@ xchk_finobt_xref_inobt(
int has_record;
int error;
ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
ASSERT(xfs_btree_is_ino(cur->bc_ops));
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record);
if (error)
......@@ -514,7 +514,7 @@ xchk_iallocbt_rec_alignment(
* Otherwise, we expect that the finobt record is aligned to the
* cluster alignment as told by the superblock.
*/
if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
if (xfs_btree_is_fino(bs->cur->bc_ops)) {
unsigned int imask;
imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
......@@ -649,8 +649,7 @@ xchk_iallocbt_rec(
*/
STATIC void
xchk_iallocbt_xref_rmap_btreeblks(
struct xfs_scrub *sc,
int which)
struct xfs_scrub *sc)
{
xfs_filblks_t blocks;
xfs_extlen_t inobt_blocks = 0;
......@@ -688,7 +687,6 @@ xchk_iallocbt_xref_rmap_btreeblks(
STATIC void
xchk_iallocbt_xref_rmap_inodes(
struct xfs_scrub *sc,
int which,
unsigned long long inodes)
{
xfs_filblks_t blocks;
......@@ -719,17 +717,14 @@ xchk_iallocbt(
.next_startino = NULLAGINO,
.next_cluster_ino = NULLAGINO,
};
xfs_btnum_t which;
int error;
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_INOBT:
cur = sc->sa.ino_cur;
which = XFS_BTNUM_INO;
break;
case XFS_SCRUB_TYPE_FINOBT:
cur = sc->sa.fino_cur;
which = XFS_BTNUM_FINO;
break;
default:
ASSERT(0);
......@@ -741,7 +736,7 @@ xchk_iallocbt(
if (error)
return error;
xchk_iallocbt_xref_rmap_btreeblks(sc, which);
xchk_iallocbt_xref_rmap_btreeblks(sc);
/*
* If we're scrubbing the inode btree, inode_blocks is the number of
......@@ -750,9 +745,8 @@ xchk_iallocbt(
* knows about. We can't do this for the finobt since it only points
* to inode chunks with free inodes.
*/
if (which == XFS_BTNUM_INO)
xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
xchk_iallocbt_xref_rmap_inodes(sc, iabt.inodes);
return error;
}
......
......@@ -663,8 +663,8 @@ xrep_ibt_build_new_trees(
ri->new_inobt.bload.claim_block = xrep_ibt_claim_block;
ri->new_inobt.bload.get_records = xrep_ibt_get_records;
ino_cur = xfs_inobt_stage_cursor(sc->sa.pag, &ri->new_inobt.afake,
XFS_BTNUM_INO);
ino_cur = xfs_inobt_init_cursor(sc->sa.pag, NULL, NULL);
xfs_btree_stage_afakeroot(ino_cur, &ri->new_inobt.afake);
error = xfs_btree_bload_compute_geometry(ino_cur, &ri->new_inobt.bload,
xfarray_length(ri->inode_records));
if (error)
......@@ -684,8 +684,8 @@ xrep_ibt_build_new_trees(
ri->new_finobt.bload.claim_block = xrep_fibt_claim_block;
ri->new_finobt.bload.get_records = xrep_fibt_get_records;
fino_cur = xfs_inobt_stage_cursor(sc->sa.pag,
&ri->new_finobt.afake, XFS_BTNUM_FINO);
fino_cur = xfs_finobt_init_cursor(sc->sa.pag, NULL, NULL);
xfs_btree_stage_afakeroot(fino_cur, &ri->new_finobt.afake);
error = xfs_btree_bload_compute_geometry(fino_cur,
&ri->new_finobt.bload, ri->finobt_recs);
if (error)
......
......@@ -113,7 +113,7 @@ xchk_iscan_find_next(
* Look up the inode chunk for the current cursor position. If there
* is no chunk here, we want the next one.
*/
cur = xfs_inobt_init_cursor(pag, tp, agi_bp, XFS_BTNUM_INO);
cur = xfs_inobt_init_cursor(pag, tp, agi_bp);
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_rec);
if (!error && !has_rec)
error = xfs_btree_increment(cur, 0, &has_rec);
......
......@@ -658,8 +658,8 @@ xrep_refc_build_new_tree(
rr->new_btree.bload.claim_block = xrep_refc_claim_block;
/* Compute how many blocks we'll need. */
refc_cur = xfs_refcountbt_stage_cursor(sc->mp, &rr->new_btree.afake,
pag);
refc_cur = xfs_refcountbt_init_cursor(sc->mp, NULL, NULL, pag);
xfs_btree_stage_afakeroot(refc_cur, &rr->new_btree.afake);
error = xfs_btree_bload_compute_geometry(refc_cur,
&rr->new_btree.bload,
xfarray_length(rr->refcount_records));
......
......@@ -832,20 +832,20 @@ xrep_ag_btcur_init(
/* Set up a bnobt cursor for cross-referencing. */
if (sc->sm->sm_type != XFS_SCRUB_TYPE_BNOBT &&
sc->sm->sm_type != XFS_SCRUB_TYPE_CNTBT) {
sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
sc->sa.pag, XFS_BTNUM_BNO);
sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
sc->sa.pag, XFS_BTNUM_CNT);
sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp,
sc->sa.pag);
sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp,
sc->sa.pag);
}
/* Set up a inobt cursor for cross-referencing. */
if (sc->sm->sm_type != XFS_SCRUB_TYPE_INOBT &&
sc->sm->sm_type != XFS_SCRUB_TYPE_FINOBT) {
sa->ino_cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp,
sa->agi_bp, XFS_BTNUM_INO);
sa->agi_bp);
if (xfs_has_finobt(mp))
sa->fino_cur = xfs_inobt_init_cursor(sc->sa.pag,
sc->tp, sa->agi_bp, XFS_BTNUM_FINO);
sa->fino_cur = xfs_finobt_init_cursor(sc->sa.pag,
sc->tp, sa->agi_bp);
}
/* Set up a rmapbt cursor for cross-referencing. */
......
......@@ -412,8 +412,8 @@ xchk_rmapbt_walk_ag_metadata(
/* OWN_AG: bnobt, cntbt, rmapbt, and AGFL */
cur = sc->sa.bno_cur;
if (!cur)
cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag, XFS_BTNUM_BNO);
cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur);
if (cur != sc->sa.bno_cur)
xfs_btree_del_cursor(cur, error);
......@@ -422,8 +422,8 @@ xchk_rmapbt_walk_ag_metadata(
cur = sc->sa.cnt_cur;
if (!cur)
cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag, XFS_BTNUM_CNT);
cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
sc->sa.pag);
error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur);
if (cur != sc->sa.cnt_cur)
xfs_btree_del_cursor(cur, error);
......@@ -447,8 +447,7 @@ xchk_rmapbt_walk_ag_metadata(
/* OWN_INOBT: inobt, finobt */
cur = sc->sa.ino_cur;
if (!cur)
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, sc->sa.agi_bp,
XFS_BTNUM_INO);
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, sc->sa.agi_bp);
error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur);
if (cur != sc->sa.ino_cur)
xfs_btree_del_cursor(cur, error);
......@@ -458,8 +457,8 @@ xchk_rmapbt_walk_ag_metadata(
if (xfs_has_finobt(sc->mp)) {
cur = sc->sa.fino_cur;
if (!cur)
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp,
sc->sa.agi_bp, XFS_BTNUM_FINO);
cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp,
sc->sa.agi_bp);
error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur);
if (cur != sc->sa.fino_cur)
xfs_btree_del_cursor(cur, error);
......
......@@ -32,14 +32,6 @@ struct xchk_fscounters;
* ring buffer. Somehow this was only worth mentioning in the ftrace sample
* code.
*/
TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi);
TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi);
TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi);
TRACE_DEFINE_ENUM(XFS_BTNUM_INOi);
TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi);
TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi);
TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi);
TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED);
TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW);
......@@ -459,7 +451,7 @@ TRACE_EVENT(xchk_btree_op_error,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, type)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(int, level)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
......@@ -472,7 +464,7 @@ TRACE_EVENT(xchk_btree_op_error,
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
......@@ -480,10 +472,10 @@ TRACE_EVENT(xchk_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
TP_printk("dev %d:%d type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x error %d ret_ip %pS",
TP_printk("dev %d:%d type %s %sbt level %d ptr %d agno 0x%x agbno 0x%x error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->ptr,
__entry->agno,
......@@ -501,7 +493,7 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
__field(xfs_ino_t, ino)
__field(int, whichfork)
__field(unsigned int, type)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(int, level)
__field(int, ptr)
__field(xfs_agnumber_t, agno)
......@@ -515,7 +507,7 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
__entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_ino.whichfork;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->ptr = cur->bc_levels[level].ptr;
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
......@@ -523,12 +515,12 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
TP_printk("dev %d:%d ino 0x%llx fork %s type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x error %d ret_ip %pS",
TP_printk("dev %d:%d ino 0x%llx fork %s type %s %sbt level %d ptr %d agno 0x%x agbno 0x%x error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
__print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->ptr,
__entry->agno,
......@@ -544,7 +536,7 @@ TRACE_EVENT(xchk_btree_error,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, type)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(int, level)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
......@@ -555,17 +547,17 @@ TRACE_EVENT(xchk_btree_error,
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
__entry->ptr = cur->bc_levels[level].ptr;
__entry->ret_ip = ret_ip;
),
TP_printk("dev %d:%d type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x ret_ip %pS",
TP_printk("dev %d:%d type %s %sbt level %d ptr %d agno 0x%x agbno 0x%x ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->ptr,
__entry->agno,
......@@ -582,7 +574,7 @@ TRACE_EVENT(xchk_ifork_btree_error,
__field(xfs_ino_t, ino)
__field(int, whichfork)
__field(unsigned int, type)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(int, level)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
......@@ -595,19 +587,19 @@ TRACE_EVENT(xchk_ifork_btree_error,
__entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_ino.whichfork;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
__entry->ptr = cur->bc_levels[level].ptr;
__entry->ret_ip = ret_ip;
),
TP_printk("dev %d:%d ino 0x%llx fork %s type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x ret_ip %pS",
TP_printk("dev %d:%d ino 0x%llx fork %s type %s %sbt level %d ptr %d agno 0x%x agbno 0x%x ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
__print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->ptr,
__entry->agno,
......@@ -622,7 +614,7 @@ DECLARE_EVENT_CLASS(xchk_sbtree_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, type)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
__field(int, level)
......@@ -634,17 +626,17 @@ DECLARE_EVENT_CLASS(xchk_sbtree_class,
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
__entry->level = level;
__entry->nlevels = cur->bc_nlevels;
__entry->ptr = cur->bc_levels[level].ptr;
),
TP_printk("dev %d:%d type %s btree %s agno 0x%x agbno 0x%x level %d nlevels %d ptr %d",
TP_printk("dev %d:%d type %s %sbt agno 0x%x agbno 0x%x level %d nlevels %d ptr %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->agno,
__entry->bno,
__entry->level,
......
......@@ -179,7 +179,7 @@ xfs_trim_gather_extents(
if (error)
goto out_trans_cancel;
cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
/*
* Look up the extent length requested in the AGF and start with it.
......
......@@ -763,8 +763,8 @@ xfs_getfsmap_datadev_bnobt_query(
return xfs_getfsmap_datadev_bnobt_helper(*curpp, &key[1], info);
/* Allocate cursor for this AG and query_range it. */
*curpp = xfs_allocbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
info->pag, XFS_BTNUM_BNO);
*curpp = xfs_bnobt_init_cursor(tp->t_mountp, tp, info->agf_bp,
info->pag);
key->ar_startblock = info->low.rm_startblock;
key[1].ar_startblock = info->high.rm_startblock;
return xfs_alloc_query_range(*curpp, key, &key[1],
......
......@@ -526,36 +526,22 @@ void
xfs_btree_mark_sick(
struct xfs_btree_cur *cur)
{
unsigned int mask;
switch (cur->bc_btnum) {
case XFS_BTNUM_BMAP:
xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork);
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_AG:
ASSERT(cur->bc_ops->sick_mask);
xfs_ag_mark_sick(cur->bc_ag.pag, cur->bc_ops->sick_mask);
return;
case XFS_BTNUM_BNO:
mask = XFS_SICK_AG_BNOBT;
break;
case XFS_BTNUM_CNT:
mask = XFS_SICK_AG_CNTBT;
break;
case XFS_BTNUM_INO:
mask = XFS_SICK_AG_INOBT;
break;
case XFS_BTNUM_FINO:
mask = XFS_SICK_AG_FINOBT;
break;
case XFS_BTNUM_RMAP:
mask = XFS_SICK_AG_RMAPBT;
break;
case XFS_BTNUM_REFC:
mask = XFS_SICK_AG_REFCNTBT;
break;
case XFS_BTREE_TYPE_INODE:
if (xfs_btree_is_bmap(cur->bc_ops)) {
xfs_bmap_mark_sick(cur->bc_ino.ip,
cur->bc_ino.whichfork);
return;
}
fallthrough;
default:
ASSERT(0);
return;
}
xfs_ag_mark_sick(cur->bc_ag.pag, mask);
}
/*
......
......@@ -266,9 +266,10 @@ xfs_iwalk_ag_start(
/* Set up a fresh cursor and empty the inobt cache. */
iwag->nr_recs = 0;
error = xfs_inobt_cur(pag, tp, XFS_BTNUM_INO, curpp, agi_bpp);
error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
if (error)
return error;
*curpp = xfs_inobt_init_cursor(pag, tp, *agi_bpp);
/* Starting at the beginning of the AG? That's easy! */
if (agino == 0)
......@@ -383,11 +384,10 @@ xfs_iwalk_run_callbacks(
}
/* ...and recreate the cursor just past where we left off. */
error = xfs_inobt_cur(iwag->pag, iwag->tp, XFS_BTNUM_INO, curpp,
agi_bpp);
error = xfs_ialloc_read_agi(iwag->pag, iwag->tp, agi_bpp);
if (error)
return error;
*curpp = xfs_inobt_init_cursor(iwag->pag, iwag->tp, *agi_bpp);
return xfs_inobt_lookup(*curpp, next_agino, XFS_LOOKUP_GE, has_more);
}
......
......@@ -1710,12 +1710,10 @@ DECLARE_EVENT_CLASS(xfs_agf_class,
__entry->agno = be32_to_cpu(agf->agf_seqno),
__entry->flags = flags;
__entry->length = be32_to_cpu(agf->agf_length),
__entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
__entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
__entry->bno_level =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
__entry->cnt_level =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
__entry->bno_root = be32_to_cpu(agf->agf_bno_root),
__entry->cnt_root = be32_to_cpu(agf->agf_cnt_root),
__entry->bno_level = be32_to_cpu(agf->agf_bno_level),
__entry->cnt_level = be32_to_cpu(agf->agf_cnt_level),
__entry->flfirst = be32_to_cpu(agf->agf_flfirst),
__entry->fllast = be32_to_cpu(agf->agf_fllast),
__entry->flcount = be32_to_cpu(agf->agf_flcount),
......@@ -1890,28 +1888,28 @@ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_near_bno);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_finish);
TRACE_EVENT(xfs_alloc_cur_check,
TP_PROTO(struct xfs_mount *mp, xfs_btnum_t btnum, xfs_agblock_t bno,
TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, xfs_extlen_t diff, bool new),
TP_ARGS(mp, btnum, bno, len, diff, new),
TP_ARGS(cur, bno, len, diff, new),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(xfs_agblock_t, bno)
__field(xfs_extlen_t, len)
__field(xfs_extlen_t, diff)
__field(bool, new)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->btnum = btnum;
__entry->dev = cur->bc_mp->m_super->s_dev;
__assign_str(name, cur->bc_ops->name);
__entry->bno = bno;
__entry->len = len;
__entry->diff = diff;
__entry->new = new;
),
TP_printk("dev %d:%d btree %s agbno 0x%x fsbcount 0x%x diff 0x%x new %d",
TP_printk("dev %d:%d %sbt agbno 0x%x fsbcount 0x%x diff 0x%x new %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->bno, __entry->len, __entry->diff, __entry->new)
)
......@@ -2452,21 +2450,12 @@ DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
DEFINE_DISCARD_EVENT(xfs_discard_exclude);
DEFINE_DISCARD_EVENT(xfs_discard_busy);
/* btree cursor events */
TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi);
TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi);
TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi);
TRACE_DEFINE_ENUM(XFS_BTNUM_INOi);
TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi);
TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi);
TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi);
DECLARE_EVENT_CLASS(xfs_btree_cur_class,
TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
TP_ARGS(cur, level, bp),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(int, level)
__field(int, nlevels)
__field(int, ptr)
......@@ -2474,15 +2463,15 @@ DECLARE_EVENT_CLASS(xfs_btree_cur_class,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->nlevels = cur->bc_nlevels;
__entry->ptr = cur->bc_levels[level].ptr;
__entry->daddr = bp ? xfs_buf_daddr(bp) : -1;
),
TP_printk("dev %d:%d btree %s level %d/%d ptr %d daddr 0x%llx",
TP_printk("dev %d:%d %sbt level %d/%d ptr %d daddr 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->nlevels,
__entry->ptr,
......@@ -2504,7 +2493,7 @@ TRACE_EVENT(xfs_btree_alloc_block,
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_ino_t, ino)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(int, error)
__field(xfs_agblock_t, agbno)
),
......@@ -2517,7 +2506,7 @@ TRACE_EVENT(xfs_btree_alloc_block,
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->ino = 0;
}
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->error = error;
if (!error && stat) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
......@@ -2534,9 +2523,9 @@ TRACE_EVENT(xfs_btree_alloc_block,
__entry->agbno = NULLAGBLOCK;
}
),
TP_printk("dev %d:%d btree %s agno 0x%x ino 0x%llx agbno 0x%x error %d",
TP_printk("dev %d:%d %sbt agno 0x%x ino 0x%llx agbno 0x%x error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->agno,
__entry->ino,
__entry->agbno,
......@@ -2550,7 +2539,7 @@ TRACE_EVENT(xfs_btree_free_block,
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_ino_t, ino)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
......@@ -2561,13 +2550,13 @@ TRACE_EVENT(xfs_btree_free_block,
__entry->ino = cur->bc_ino.ip->i_ino;
else
__entry->ino = 0;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->agbno = xfs_daddr_to_agbno(cur->bc_mp,
xfs_buf_daddr(bp));
),
TP_printk("dev %d:%d btree %s agno 0x%x ino 0x%llx agbno 0x%x",
TP_printk("dev %d:%d %sbt agno 0x%x ino 0x%llx agbno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->agno,
__entry->ino,
__entry->agbno)
......@@ -4144,7 +4133,7 @@ TRACE_EVENT(xfs_btree_commit_afakeroot,
TP_ARGS(cur),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(unsigned int, levels)
......@@ -4152,15 +4141,15 @@ TRACE_EVENT(xfs_btree_commit_afakeroot,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->agbno = cur->bc_ag.afake->af_root;
__entry->levels = cur->bc_ag.afake->af_levels;
__entry->blocks = cur->bc_ag.afake->af_blocks;
),
TP_printk("dev %d:%d btree %s agno 0x%x levels %u blocks %u root %u",
TP_printk("dev %d:%d %sbt agno 0x%x levels %u blocks %u root %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->agno,
__entry->levels,
__entry->blocks,
......@@ -4172,7 +4161,7 @@ TRACE_EVENT(xfs_btree_commit_ifakeroot,
TP_ARGS(cur),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, agino)
__field(unsigned int, levels)
......@@ -4181,7 +4170,7 @@ TRACE_EVENT(xfs_btree_commit_ifakeroot,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->agno = XFS_INO_TO_AGNO(cur->bc_mp,
cur->bc_ino.ip->i_ino);
__entry->agino = XFS_INO_TO_AGINO(cur->bc_mp,
......@@ -4190,9 +4179,9 @@ TRACE_EVENT(xfs_btree_commit_ifakeroot,
__entry->blocks = cur->bc_ino.ifake->if_blocks;
__entry->whichfork = cur->bc_ino.whichfork;
),
TP_printk("dev %d:%d btree %s agno 0x%x agino 0x%x whichfork %s levels %u blocks %u",
TP_printk("dev %d:%d %sbt agno 0x%x agino 0x%x whichfork %s levels %u blocks %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->agno,
__entry->agino,
__print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
......@@ -4209,7 +4198,7 @@ TRACE_EVENT(xfs_btree_bload_level_geometry,
blocks_with_extra),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(unsigned int, level)
__field(unsigned int, nlevels)
__field(uint64_t, nr_this_level)
......@@ -4220,7 +4209,7 @@ TRACE_EVENT(xfs_btree_bload_level_geometry,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->nlevels = cur->bc_nlevels;
__entry->nr_this_level = nr_this_level;
......@@ -4229,9 +4218,9 @@ TRACE_EVENT(xfs_btree_bload_level_geometry,
__entry->blocks = blocks;
__entry->blocks_with_extra = blocks_with_extra;
),
TP_printk("dev %d:%d btree %s level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu",
TP_printk("dev %d:%d %sbt level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->nlevels,
__entry->nr_this_level,
......@@ -4248,7 +4237,7 @@ TRACE_EVENT(xfs_btree_bload_block,
TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_btnum_t, btnum)
__string(name, cur->bc_ops->name)
__field(unsigned int, level)
__field(unsigned long long, block_idx)
__field(unsigned long long, nr_blocks)
......@@ -4258,7 +4247,7 @@ TRACE_EVENT(xfs_btree_bload_block,
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->btnum = cur->bc_btnum;
__assign_str(name, cur->bc_ops->name);
__entry->level = level;
__entry->block_idx = block_idx;
__entry->nr_blocks = nr_blocks;
......@@ -4273,9 +4262,9 @@ TRACE_EVENT(xfs_btree_bload_block,
}
__entry->nr_records = nr_records;
),
TP_printk("dev %d:%d btree %s level %u block %llu/%llu agno 0x%x agbno 0x%x recs %u",
TP_printk("dev %d:%d %sbt level %u block %llu/%llu agno 0x%x agbno 0x%x recs %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__get_str(name),
__entry->level,
__entry->block_idx,
__entry->nr_blocks,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment