Commit fd9c7f77 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: encode the btree geometry flags in the btree ops structure

Certain btree flags never change for the life of a btree cursor because
they describe the geometry of the btree itself.  Encode these in the
btree ops structure and reduce the amount of code required in each btree
type's init_cursor functions.  This also frees up most of the bits in
bc_flags.

A previous version of this patch also converted the open-coded flags
logic to helpers.  This was removed due to the pending refactoring (that
follows this patch) to eliminate most of the state flags.

Conversion script:

sed \
 -e 's/XFS_BTREE_LONG_PTRS/XFS_BTGEO_LONG_PTRS/g' \
 -e 's/XFS_BTREE_ROOT_IN_INODE/XFS_BTGEO_ROOT_IN_INODE/g' \
 -e 's/XFS_BTREE_LASTREC_UPDATE/XFS_BTGEO_LASTREC_UPDATE/g' \
 -e 's/XFS_BTREE_OVERLAPPING/XFS_BTGEO_OVERLAPPING/g' \
 -e 's/cur->bc_flags & XFS_BTGEO_/cur->bc_ops->geom_flags \& XFS_BTGEO_/g' \
 -i $(git ls-files fs/xfs/*.[ch] fs/xfs/libxfs/*.[ch] fs/xfs/scrub/*.[ch])
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent c0afba9a
...@@ -478,6 +478,8 @@ static const struct xfs_btree_ops xfs_bnobt_ops = { ...@@ -478,6 +478,8 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
}; };
static const struct xfs_btree_ops xfs_cntbt_ops = { static const struct xfs_btree_ops xfs_cntbt_ops = {
.geom_flags = XFS_BTGEO_LASTREC_UPDATE,
.rec_len = sizeof(xfs_alloc_rec_t), .rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t), .key_len = sizeof(xfs_alloc_key_t),
...@@ -516,7 +518,6 @@ xfs_allocbt_init_common( ...@@ -516,7 +518,6 @@ xfs_allocbt_init_common(
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops, cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else { } else {
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops, cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
...@@ -591,7 +592,6 @@ xfs_allocbt_commit_staged_btree( ...@@ -591,7 +592,6 @@ xfs_allocbt_commit_staged_btree(
if (cur->bc_btnum == XFS_BTNUM_BNO) { if (cur->bc_btnum == XFS_BTNUM_BNO) {
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops); xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
} else { } else {
cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops); xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
} }
} }
......
...@@ -646,7 +646,7 @@ xfs_bmap_extents_to_btree( ...@@ -646,7 +646,7 @@ xfs_bmap_extents_to_btree(
block = ifp->if_broot; block = ifp->if_broot;
xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
XFS_BTNUM_BMAP, 1, 1, ip->i_ino, XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
XFS_BTREE_LONG_PTRS); XFS_BTGEO_LONG_PTRS);
/* /*
* Need a cursor. Can't allocate until bb_level is filled in. * Need a cursor. Can't allocate until bb_level is filled in.
*/ */
...@@ -693,7 +693,7 @@ xfs_bmap_extents_to_btree( ...@@ -693,7 +693,7 @@ xfs_bmap_extents_to_btree(
ablock = XFS_BUF_TO_BLOCK(abp); ablock = XFS_BUF_TO_BLOCK(abp);
xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp), xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
XFS_BTNUM_BMAP, 0, 0, ip->i_ino, XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS); XFS_BTGEO_LONG_PTRS);
for_each_xfs_iext(ifp, &icur, &rec) { for_each_xfs_iext(ifp, &icur, &rec) {
if (isnullstartblock(rec.br_startblock)) if (isnullstartblock(rec.br_startblock))
......
...@@ -46,7 +46,7 @@ xfs_bmdr_to_bmbt( ...@@ -46,7 +46,7 @@ xfs_bmdr_to_bmbt(
xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL, xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
XFS_BTNUM_BMAP, 0, 0, ip->i_ino, XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS); XFS_BTGEO_LONG_PTRS);
rblock->bb_level = dblock->bb_level; rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0); ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs; rblock->bb_numrecs = dblock->bb_numrecs;
...@@ -516,6 +516,8 @@ xfs_bmbt_keys_contiguous( ...@@ -516,6 +516,8 @@ xfs_bmbt_keys_contiguous(
} }
static const struct xfs_btree_ops xfs_bmbt_ops = { static const struct xfs_btree_ops xfs_bmbt_ops = {
.geom_flags = XFS_BTGEO_LONG_PTRS | XFS_BTGEO_ROOT_IN_INODE,
.rec_len = sizeof(xfs_bmbt_rec_t), .rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t), .key_len = sizeof(xfs_bmbt_key_t),
...@@ -553,8 +555,6 @@ xfs_bmbt_init_common( ...@@ -553,8 +555,6 @@ xfs_bmbt_init_common(
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache); mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
cur->bc_ino.ip = ip; cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0; cur->bc_ino.allocated = 0;
cur->bc_ino.flags = 0; cur->bc_ino.flags = 0;
......
This diff is collapsed.
...@@ -112,6 +112,9 @@ static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y) ...@@ -112,6 +112,9 @@ static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
} }
struct xfs_btree_ops { struct xfs_btree_ops {
/* XFS_BTGEO_* flags that determine the geometry of the btree */
unsigned int geom_flags;
/* size of the key and record structures */ /* size of the key and record structures */
size_t key_len; size_t key_len;
size_t rec_len; size_t rec_len;
...@@ -199,6 +202,12 @@ struct xfs_btree_ops { ...@@ -199,6 +202,12 @@ struct xfs_btree_ops {
const union xfs_btree_key *mask); const union xfs_btree_key *mask);
}; };
/* btree geometry flags */
#define XFS_BTGEO_LONG_PTRS (1U << 0) /* pointers are 64bits long */
#define XFS_BTGEO_ROOT_IN_INODE (1U << 1) /* root may be variable size */
#define XFS_BTGEO_LASTREC_UPDATE (1U << 2) /* track last rec externally */
#define XFS_BTGEO_OVERLAPPING (1U << 3) /* overlapping intervals */
/* /*
* Reasons for the update_lastrec method to be called. * Reasons for the update_lastrec method to be called.
*/ */
...@@ -281,7 +290,7 @@ struct xfs_btree_cur ...@@ -281,7 +290,7 @@ struct xfs_btree_cur
/* /*
* Short btree pointers need an agno to be able to turn the pointers * Short btree pointers need an agno to be able to turn the pointers
* into physical addresses for IO, so the btree cursor switches between * into physical addresses for IO, so the btree cursor switches between
* bc_ino and bc_ag based on whether XFS_BTREE_LONG_PTRS is set for the * bc_ino and bc_ag based on whether XFS_BTGEO_LONG_PTRS is set for the
* cursor. * cursor.
*/ */
union { union {
...@@ -304,17 +313,13 @@ xfs_btree_cur_sizeof(unsigned int nlevels) ...@@ -304,17 +313,13 @@ xfs_btree_cur_sizeof(unsigned int nlevels)
return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels); return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
} }
/* cursor flags */ /* cursor state flags */
#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
/* /*
* The root of this btree is a fakeroot structure so that we can stage a btree * The root of this btree is a fakeroot structure so that we can stage a btree
* rebuild without leaving it accessible via primary metadata. The ops struct * rebuild without leaving it accessible via primary metadata. The ops struct
* is dynamically allocated and must be freed when the cursor is deleted. * is dynamically allocated and must be freed when the cursor is deleted.
*/ */
#define XFS_BTREE_STAGING (1<<5) #define XFS_BTREE_STAGING (1U << 0)
#define XFS_BTREE_NOERROR 0 #define XFS_BTREE_NOERROR 0
#define XFS_BTREE_ERROR 1 #define XFS_BTREE_ERROR 1
...@@ -447,7 +452,7 @@ xfs_btree_init_block_int( ...@@ -447,7 +452,7 @@ xfs_btree_init_block_int(
__u16 level, __u16 level,
__u16 numrecs, __u16 numrecs,
__u64 owner, __u64 owner,
unsigned int flags); unsigned int geom_flags);
/* /*
* Common btree core entry points. * Common btree core entry points.
...@@ -689,7 +694,7 @@ xfs_btree_islastblock( ...@@ -689,7 +694,7 @@ xfs_btree_islastblock(
block = xfs_btree_get_block(cur, level, &bp); block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) if (cur->bc_ops->geom_flags & XFS_BTGEO_LONG_PTRS)
return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
} }
......
...@@ -136,7 +136,7 @@ xfs_btree_stage_afakeroot( ...@@ -136,7 +136,7 @@ xfs_btree_stage_afakeroot(
struct xfs_btree_ops *nops; struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING)); ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)); ASSERT(!(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE));
ASSERT(cur->bc_tp == NULL); ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL); nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
...@@ -217,7 +217,7 @@ xfs_btree_stage_ifakeroot( ...@@ -217,7 +217,7 @@ xfs_btree_stage_ifakeroot(
struct xfs_btree_ops *nops; struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING)); ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE);
ASSERT(cur->bc_tp == NULL); ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL); nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
...@@ -397,7 +397,7 @@ xfs_btree_bload_prep_block( ...@@ -397,7 +397,7 @@ xfs_btree_bload_prep_block(
struct xfs_btree_block *new_block; struct xfs_btree_block *new_block;
int ret; int ret;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) { level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
size_t new_size; size_t new_size;
...@@ -413,7 +413,7 @@ xfs_btree_bload_prep_block( ...@@ -413,7 +413,7 @@ xfs_btree_bload_prep_block(
xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot, xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot,
XFS_BUF_DADDR_NULL, cur->bc_btnum, level, XFS_BUF_DADDR_NULL, cur->bc_btnum, level,
nr_this_block, cur->bc_ino.ip->i_ino, nr_this_block, cur->bc_ino.ip->i_ino,
cur->bc_flags); cur->bc_ops->geom_flags);
*bpp = NULL; *bpp = NULL;
*blockp = ifp->if_broot; *blockp = ifp->if_broot;
...@@ -704,7 +704,7 @@ xfs_btree_bload_compute_geometry( ...@@ -704,7 +704,7 @@ xfs_btree_bload_compute_geometry(
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level, xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &level_blocks, &dontcare64); &avg_per_block, &level_blocks, &dontcare64);
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) {
/* /*
* If all the items we want to store at this level * If all the items we want to store at this level
* would fit in the inode root block, then we have our * would fit in the inode root block, then we have our
...@@ -763,7 +763,7 @@ xfs_btree_bload_compute_geometry( ...@@ -763,7 +763,7 @@ xfs_btree_bload_compute_geometry(
return -EOVERFLOW; return -EOVERFLOW;
bbl->btree_height = cur->bc_nlevels; bbl->btree_height = cur->bc_nlevels;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)
bbl->nr_blocks = nr_blocks - 1; bbl->nr_blocks = nr_blocks - 1;
else else
bbl->nr_blocks = nr_blocks; bbl->nr_blocks = nr_blocks;
...@@ -890,7 +890,7 @@ xfs_btree_bload( ...@@ -890,7 +890,7 @@ xfs_btree_bload(
} }
/* Initialize the new root. */ /* Initialize the new root. */
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) {
ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
cur->bc_ino.ifake->if_levels = cur->bc_nlevels; cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
cur->bc_ino.ifake->if_blocks = total_blocks - 1; cur->bc_ino.ifake->if_blocks = total_blocks - 1;
......
...@@ -76,7 +76,7 @@ struct xfs_btree_bload { ...@@ -76,7 +76,7 @@ struct xfs_btree_bload {
/* /*
* This function should return the size of the in-core btree root * This function should return the size of the in-core btree root
* block. It is only necessary for XFS_BTREE_ROOT_IN_INODE btree * block. It is only necessary for XFS_BTGEO_ROOT_IN_INODE btree
* types. * types.
*/ */
xfs_btree_bload_iroot_size_fn iroot_size; xfs_btree_bload_iroot_size_fn iroot_size;
......
...@@ -473,6 +473,8 @@ xfs_rmapbt_keys_contiguous( ...@@ -473,6 +473,8 @@ xfs_rmapbt_keys_contiguous(
} }
static const struct xfs_btree_ops xfs_rmapbt_ops = { static const struct xfs_btree_ops xfs_rmapbt_ops = {
.geom_flags = XFS_BTGEO_OVERLAPPING,
.rec_len = sizeof(struct xfs_rmap_rec), .rec_len = sizeof(struct xfs_rmap_rec),
.key_len = 2 * sizeof(struct xfs_rmap_key), .key_len = 2 * sizeof(struct xfs_rmap_key),
...@@ -505,7 +507,6 @@ xfs_rmapbt_init_common( ...@@ -505,7 +507,6 @@ xfs_rmapbt_init_common(
/* Overlapping btree; 2 keys per pointer. */ /* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops, cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_flags = XFS_BTREE_OVERLAPPING;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
......
...@@ -47,7 +47,7 @@ __xchk_btree_process_error( ...@@ -47,7 +47,7 @@ __xchk_btree_process_error(
*error = 0; *error = 0;
fallthrough; fallthrough;
default: default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)
trace_xchk_ifork_btree_op_error(sc, cur, level, trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip); *error, ret_ip);
else else
...@@ -91,7 +91,7 @@ __xchk_btree_set_corrupt( ...@@ -91,7 +91,7 @@ __xchk_btree_set_corrupt(
{ {
sc->sm->sm_flags |= errflag; sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)
trace_xchk_ifork_btree_error(sc, cur, level, trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip); ret_ip);
else else
...@@ -168,7 +168,7 @@ xchk_btree_rec( ...@@ -168,7 +168,7 @@ xchk_btree_rec(
if (xfs_btree_keycmp_lt(cur, &key, keyp)) if (xfs_btree_keycmp_lt(cur, &key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return; return;
/* Is high_key(rec) no larger than the parent high key? */ /* Is high_key(rec) no larger than the parent high key? */
...@@ -215,7 +215,7 @@ xchk_btree_key( ...@@ -215,7 +215,7 @@ xchk_btree_key(
if (xfs_btree_keycmp_lt(cur, key, keyp)) if (xfs_btree_keycmp_lt(cur, key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return; return;
/* Is this block's high key no larger than the parent high key? */ /* Is this block's high key no larger than the parent high key? */
...@@ -239,12 +239,12 @@ xchk_btree_ptr_ok( ...@@ -239,12 +239,12 @@ xchk_btree_ptr_ok(
bool res; bool res;
/* A btree rooted in an inode has no block pointer to the root. */ /* A btree rooted in an inode has no block pointer to the root. */
if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && if ((bs->cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) &&
level == bs->cur->bc_nlevels) level == bs->cur->bc_nlevels)
return true; return true;
/* Otherwise, check the pointers. */ /* Otherwise, check the pointers. */
if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS) if (bs->cur->bc_ops->geom_flags & XFS_BTGEO_LONG_PTRS)
res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level); res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level);
else else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level); res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
...@@ -390,7 +390,7 @@ xchk_btree_check_block_owner( ...@@ -390,7 +390,7 @@ xchk_btree_check_block_owner(
* sc->sa so that we can check for the presence of an ownership record * sc->sa so that we can check for the presence of an ownership record
* in the rmap btree for the AG containing the block. * in the rmap btree for the AG containing the block.
*/ */
init_sa = bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE; init_sa = bs->cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE;
if (init_sa) { if (init_sa) {
error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa); error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
...@@ -434,7 +434,7 @@ xchk_btree_check_owner( ...@@ -434,7 +434,7 @@ xchk_btree_check_owner(
* up. * up.
*/ */
if (bp == NULL) { if (bp == NULL) {
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)) if (!(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE))
xchk_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0; return 0;
} }
...@@ -513,7 +513,7 @@ xchk_btree_check_minrecs( ...@@ -513,7 +513,7 @@ xchk_btree_check_minrecs(
* child block might be less than the standard minrecs, but that's ok * child block might be less than the standard minrecs, but that's ok
* provided that there's only one direct child of the root. * provided that there's only one direct child of the root.
*/ */
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 2) { level == cur->bc_nlevels - 2) {
struct xfs_btree_block *root_block; struct xfs_btree_block *root_block;
struct xfs_buf *root_bp; struct xfs_buf *root_bp;
...@@ -567,7 +567,7 @@ xchk_btree_block_check_keys( ...@@ -567,7 +567,7 @@ xchk_btree_block_check_keys(
return; return;
} }
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return; return;
/* Make sure the high key of this block matches the parent. */ /* Make sure the high key of this block matches the parent. */
...@@ -602,7 +602,7 @@ xchk_btree_get_block( ...@@ -602,7 +602,7 @@ xchk_btree_get_block(
return error; return error;
xfs_btree_get_block(bs->cur, level, pbp); xfs_btree_get_block(bs->cur, level, pbp);
if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS) if (bs->cur->bc_ops->geom_flags & XFS_BTGEO_LONG_PTRS)
failed_at = __xfs_btree_check_lblock(bs->cur, *pblock, failed_at = __xfs_btree_check_lblock(bs->cur, *pblock,
level, *pbp); level, *pbp);
else else
...@@ -669,7 +669,7 @@ xchk_btree_block_keys( ...@@ -669,7 +669,7 @@ xchk_btree_block_keys(
if (xfs_btree_keycmp_ne(cur, &block_keys, parent_keys)) if (xfs_btree_keycmp_ne(cur, &block_keys, parent_keys))
xchk_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return; return;
/* Get high keys */ /* Get high keys */
......
...@@ -535,7 +535,7 @@ xrep_newbt_claim_block( ...@@ -535,7 +535,7 @@ xrep_newbt_claim_block(
trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1, trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1,
xnr->oinfo.oi_owner); xnr->oinfo.oi_owner);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) if (cur->bc_ops->geom_flags & XFS_BTGEO_LONG_PTRS)
ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno, ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
agbno)); agbno));
else else
......
...@@ -37,7 +37,7 @@ xchk_btree_cur_fsbno( ...@@ -37,7 +37,7 @@ xchk_btree_cur_fsbno(
xfs_buf_daddr(cur->bc_levels[level].bp)); xfs_buf_daddr(cur->bc_levels[level].bp));
if (level == cur->bc_nlevels - 1 && if (level == cur->bc_nlevels - 1 &&
(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)) (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE))
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino); return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
return NULLFSBLOCK; return NULLFSBLOCK;
......
...@@ -2510,7 +2510,7 @@ TRACE_EVENT(xfs_btree_alloc_block, ...@@ -2510,7 +2510,7 @@ TRACE_EVENT(xfs_btree_alloc_block,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev; __entry->dev = cur->bc_mp->m_super->s_dev;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) {
__entry->agno = 0; __entry->agno = 0;
__entry->ino = cur->bc_ino.ip->i_ino; __entry->ino = cur->bc_ino.ip->i_ino;
} else { } else {
...@@ -2520,7 +2520,7 @@ TRACE_EVENT(xfs_btree_alloc_block, ...@@ -2520,7 +2520,7 @@ TRACE_EVENT(xfs_btree_alloc_block,
__entry->btnum = cur->bc_btnum; __entry->btnum = cur->bc_btnum;
__entry->error = error; __entry->error = error;
if (!error && stat) { if (!error && stat) {
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (cur->bc_ops->geom_flags & XFS_BTGEO_LONG_PTRS) {
xfs_fsblock_t fsb = be64_to_cpu(ptr->l); xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp,
...@@ -2557,7 +2557,7 @@ TRACE_EVENT(xfs_btree_free_block, ...@@ -2557,7 +2557,7 @@ TRACE_EVENT(xfs_btree_free_block,
__entry->dev = cur->bc_mp->m_super->s_dev; __entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = xfs_daddr_to_agno(cur->bc_mp, __entry->agno = xfs_daddr_to_agno(cur->bc_mp,
xfs_buf_daddr(bp)); xfs_buf_daddr(bp));
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)
__entry->ino = cur->bc_ino.ip->i_ino; __entry->ino = cur->bc_ino.ip->i_ino;
else else
__entry->ino = 0; __entry->ino = 0;
...@@ -4262,7 +4262,7 @@ TRACE_EVENT(xfs_btree_bload_block, ...@@ -4262,7 +4262,7 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->level = level; __entry->level = level;
__entry->block_idx = block_idx; __entry->block_idx = block_idx;
__entry->nr_blocks = nr_blocks; __entry->nr_blocks = nr_blocks;
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (cur->bc_ops->geom_flags & XFS_BTGEO_LONG_PTRS) {
xfs_fsblock_t fsb = be64_to_cpu(ptr->l); xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb); __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment