Commit 4f0cd5a5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: split out a btree type from the btree ops geometry flags

Two of the btree cursor flags are always used together and encode
the fundamental btree type.  There currently are two such types:

 1) an on-disk AG-rooted btree with 32-bit pointers
 2) an on-disk inode-rooted btree with 64-bit pointers

and we're about to add:

 3) an in-memory btree with 64-bit pointers

Introduce a new enum and a new type field in struct xfs_btree_geom
to encode this type directly instead of using flags and change most
code to switch on this enum.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
[djwong: make the pointer lengths explicit]
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 1a9d2629
...@@ -455,6 +455,8 @@ xfs_allocbt_keys_contiguous( ...@@ -455,6 +455,8 @@ xfs_allocbt_keys_contiguous(
} }
const struct xfs_btree_ops xfs_bnobt_ops = { const struct xfs_btree_ops xfs_bnobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_alloc_rec_t), .rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t), .key_len = sizeof(xfs_alloc_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN, .ptr_len = XFS_BTREE_SHORT_PTR_LEN,
...@@ -482,6 +484,7 @@ const struct xfs_btree_ops xfs_bnobt_ops = { ...@@ -482,6 +484,7 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
}; };
const struct xfs_btree_ops xfs_cntbt_ops = { const struct xfs_btree_ops xfs_cntbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_LASTREC_UPDATE, .geom_flags = XFS_BTGEO_LASTREC_UPDATE,
.rec_len = sizeof(xfs_alloc_rec_t), .rec_len = sizeof(xfs_alloc_rec_t),
......
...@@ -525,7 +525,7 @@ xfs_bmbt_keys_contiguous( ...@@ -525,7 +525,7 @@ xfs_bmbt_keys_contiguous(
} }
const struct xfs_btree_ops xfs_bmbt_ops = { const struct xfs_btree_ops xfs_bmbt_ops = {
.geom_flags = XFS_BTGEO_ROOT_IN_INODE, .type = XFS_BTREE_TYPE_INODE,
.rec_len = sizeof(xfs_bmbt_rec_t), .rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t), .key_len = sizeof(xfs_bmbt_key_t),
......
...@@ -447,10 +447,19 @@ xfs_btree_del_cursor( ...@@ -447,10 +447,19 @@ xfs_btree_del_cursor(
*/ */
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 || ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
xfs_is_shutdown(cur->bc_mp) || error != 0); xfs_is_shutdown(cur->bc_mp) || error != 0);
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_AG:
if (cur->bc_ag.pag)
xfs_perag_put(cur->bc_ag.pag);
break;
case XFS_BTREE_TYPE_INODE:
/* nothing to do */
break;
}
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kfree(cur->bc_ops); kfree(cur->bc_ops);
if (!(cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) && cur->bc_ag.pag)
xfs_perag_put(cur->bc_ag.pag);
kmem_cache_free(cur->bc_cache, cur); kmem_cache_free(cur->bc_cache, cur);
} }
...@@ -708,7 +717,7 @@ struct xfs_ifork * ...@@ -708,7 +717,7 @@ struct xfs_ifork *
xfs_btree_ifork_ptr( xfs_btree_ifork_ptr(
struct xfs_btree_cur *cur) struct xfs_btree_cur *cur)
{ {
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE); ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
if (cur->bc_flags & XFS_BTREE_STAGING) if (cur->bc_flags & XFS_BTREE_STAGING)
return cur->bc_ino.ifake->if_fork; return cur->bc_ino.ifake->if_fork;
...@@ -740,8 +749,8 @@ xfs_btree_get_block( ...@@ -740,8 +749,8 @@ xfs_btree_get_block(
int level, /* level in btree */ int level, /* level in btree */
struct xfs_buf **bpp) /* buffer containing the block */ struct xfs_buf **bpp) /* buffer containing the block */
{ {
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
(level == cur->bc_nlevels - 1)) { level == cur->bc_nlevels - 1) {
*bpp = NULL; *bpp = NULL;
return xfs_btree_get_iroot(cur); return xfs_btree_get_iroot(cur);
} }
...@@ -983,8 +992,8 @@ xfs_btree_readahead( ...@@ -983,8 +992,8 @@ xfs_btree_readahead(
* No readahead needed if we are at the root level and the * No readahead needed if we are at the root level and the
* btree root is stored in the inode. * btree root is stored in the inode.
*/ */
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
(lev == cur->bc_nlevels - 1)) lev == cur->bc_nlevels - 1)
return 0; return 0;
if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra) if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra)
...@@ -1172,14 +1181,12 @@ __xfs_btree_init_block( ...@@ -1172,14 +1181,12 @@ __xfs_btree_init_block(
buf->bb_u.l.bb_lsn = 0; buf->bb_u.l.bb_lsn = 0;
} }
} else { } else {
/* owner is a 32 bit value on short blocks */
__u32 __owner = (__u32)owner;
buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
if (crc) { if (crc) {
buf->bb_u.s.bb_blkno = cpu_to_be64(blkno); buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
buf->bb_u.s.bb_owner = cpu_to_be32(__owner); /* owner is a 32 bit value on short blocks */
buf->bb_u.s.bb_owner = cpu_to_be32((__u32)owner);
uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid); uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
buf->bb_u.s.bb_lsn = 0; buf->bb_u.s.bb_lsn = 0;
} }
...@@ -1217,7 +1224,7 @@ static inline __u64 ...@@ -1217,7 +1224,7 @@ static inline __u64
xfs_btree_owner( xfs_btree_owner(
struct xfs_btree_cur *cur) struct xfs_btree_cur *cur)
{ {
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
return cur->bc_ino.ip->i_ino; return cur->bc_ino.ip->i_ino;
return cur->bc_ag.pag->pag_agno; return cur->bc_ag.pag->pag_agno;
} }
...@@ -1638,7 +1645,7 @@ xfs_btree_increment( ...@@ -1638,7 +1645,7 @@ xfs_btree_increment(
* confused or have the tree root in an inode. * confused or have the tree root in an inode.
*/ */
if (lev == cur->bc_nlevels) { if (lev == cur->bc_nlevels) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
goto out0; goto out0;
ASSERT(0); ASSERT(0);
xfs_btree_mark_sick(cur); xfs_btree_mark_sick(cur);
...@@ -1732,7 +1739,7 @@ xfs_btree_decrement( ...@@ -1732,7 +1739,7 @@ xfs_btree_decrement(
* or the root of the tree is in an inode. * or the root of the tree is in an inode.
*/ */
if (lev == cur->bc_nlevels) { if (lev == cur->bc_nlevels) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
goto out0; goto out0;
ASSERT(0); ASSERT(0);
xfs_btree_mark_sick(cur); xfs_btree_mark_sick(cur);
...@@ -1807,8 +1814,8 @@ xfs_btree_lookup_get_block( ...@@ -1807,8 +1814,8 @@ xfs_btree_lookup_get_block(
int error = 0; int error = 0;
/* special case the root block if in an inode */ /* special case the root block if in an inode */
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
(level == cur->bc_nlevels - 1)) { level == cur->bc_nlevels - 1) {
*blkp = xfs_btree_get_iroot(cur); *blkp = xfs_btree_get_iroot(cur);
return 0; return 0;
} }
...@@ -2343,7 +2350,7 @@ xfs_btree_lshift( ...@@ -2343,7 +2350,7 @@ xfs_btree_lshift(
int error; /* error return value */ int error; /* error return value */
int i; int i;
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if ((cur->bc_ops->type == XFS_BTREE_TYPE_INODE) &&
level == cur->bc_nlevels - 1) level == cur->bc_nlevels - 1)
goto out0; goto out0;
...@@ -2539,8 +2546,8 @@ xfs_btree_rshift( ...@@ -2539,8 +2546,8 @@ xfs_btree_rshift(
int error; /* error return value */ int error; /* error return value */
int i; /* loop counter */ int i; /* loop counter */
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
(level == cur->bc_nlevels - 1)) level == cur->bc_nlevels - 1)
goto out0; goto out0;
/* Set up variables for this block as "left". */ /* Set up variables for this block as "left". */
...@@ -2990,7 +2997,6 @@ xfs_btree_split( ...@@ -2990,7 +2997,6 @@ xfs_btree_split(
#define xfs_btree_split __xfs_btree_split #define xfs_btree_split __xfs_btree_split
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* /*
* Copy the old inode root contents into a real block and make the * Copy the old inode root contents into a real block and make the
* broot point to it. * broot point to it.
...@@ -3015,7 +3021,7 @@ xfs_btree_new_iroot( ...@@ -3015,7 +3021,7 @@ xfs_btree_new_iroot(
XFS_BTREE_STATS_INC(cur, newroot); XFS_BTREE_STATS_INC(cur, newroot);
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE); ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
level = cur->bc_nlevels - 1; level = cur->bc_nlevels - 1;
...@@ -3240,7 +3246,7 @@ xfs_btree_make_block_unfull( ...@@ -3240,7 +3246,7 @@ xfs_btree_make_block_unfull(
{ {
int error = 0; int error = 0;
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 1) { level == cur->bc_nlevels - 1) {
struct xfs_inode *ip = cur->bc_ino.ip; struct xfs_inode *ip = cur->bc_ino.ip;
...@@ -3326,8 +3332,8 @@ xfs_btree_insrec( ...@@ -3326,8 +3332,8 @@ xfs_btree_insrec(
* If we have an external root pointer, and we've made it to the * If we have an external root pointer, and we've made it to the
* root level, allocate a new root block and we're done. * root level, allocate a new root block and we're done.
*/ */
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE &&
(level >= cur->bc_nlevels)) { level >= cur->bc_nlevels) {
error = xfs_btree_new_root(cur, stat); error = xfs_btree_new_root(cur, stat);
xfs_btree_set_ptr_null(cur, ptrp); xfs_btree_set_ptr_null(cur, ptrp);
...@@ -3614,7 +3620,7 @@ xfs_btree_kill_iroot( ...@@ -3614,7 +3620,7 @@ xfs_btree_kill_iroot(
#endif #endif
int i; int i;
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE); ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_nlevels > 1); ASSERT(cur->bc_nlevels > 1);
/* /*
...@@ -3851,7 +3857,7 @@ xfs_btree_delrec( ...@@ -3851,7 +3857,7 @@ xfs_btree_delrec(
* nothing left to do. * nothing left to do.
*/ */
if (level == cur->bc_nlevels - 1) { if (level == cur->bc_nlevels - 1) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) { if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
xfs_iroot_realloc(cur->bc_ino.ip, -1, xfs_iroot_realloc(cur->bc_ino.ip, -1,
cur->bc_ino.whichfork); cur->bc_ino.whichfork);
...@@ -3919,7 +3925,7 @@ xfs_btree_delrec( ...@@ -3919,7 +3925,7 @@ xfs_btree_delrec(
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) { if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
/* /*
* One child of root, need to get a chance to copy its contents * One child of root, need to get a chance to copy its contents
* into the root and delete it. Can't go up to next level, * into the root and delete it. Can't go up to next level,
...@@ -4236,8 +4242,8 @@ xfs_btree_delrec( ...@@ -4236,8 +4242,8 @@ xfs_btree_delrec(
* If we joined with the right neighbor and there's a level above * If we joined with the right neighbor and there's a level above
* us, increment the cursor at that level. * us, increment the cursor at that level.
*/ */
else if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) || else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE ||
(level + 1 < cur->bc_nlevels)) { level + 1 < cur->bc_nlevels) {
error = xfs_btree_increment(cur, level + 1, &i); error = xfs_btree_increment(cur, level + 1, &i);
if (error) if (error)
goto error0; goto error0;
...@@ -4528,7 +4534,7 @@ xfs_btree_block_change_owner( ...@@ -4528,7 +4534,7 @@ xfs_btree_block_change_owner(
* though, so everything is consistent in memory. * though, so everything is consistent in memory.
*/ */
if (!bp) { if (!bp) {
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE); ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(level == cur->bc_nlevels - 1); ASSERT(level == cur->bc_nlevels - 1);
return 0; return 0;
} }
......
...@@ -117,7 +117,15 @@ static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y) ...@@ -117,7 +117,15 @@ static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
#define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64)) #define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64))
#define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32)) #define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32))
enum xfs_btree_type {
XFS_BTREE_TYPE_AG,
XFS_BTREE_TYPE_INODE,
};
struct xfs_btree_ops { struct xfs_btree_ops {
/* Type of btree - AG-rooted or inode-rooted */
enum xfs_btree_type type;
/* XFS_BTGEO_* flags that determine the geometry of the btree */ /* XFS_BTGEO_* flags that determine the geometry of the btree */
unsigned int geom_flags; unsigned int geom_flags;
...@@ -216,9 +224,8 @@ struct xfs_btree_ops { ...@@ -216,9 +224,8 @@ struct xfs_btree_ops {
}; };
/* btree geometry flags */ /* btree geometry flags */
#define XFS_BTGEO_ROOT_IN_INODE (1U << 0) /* root may be variable size */ #define XFS_BTGEO_LASTREC_UPDATE (1U << 0) /* track last rec externally */
#define XFS_BTGEO_LASTREC_UPDATE (1U << 1) /* track last rec externally */ #define XFS_BTGEO_OVERLAPPING (1U << 1) /* overlapping intervals */
#define XFS_BTGEO_OVERLAPPING (1U << 2) /* overlapping intervals */
/* /*
* Reasons for the update_lastrec method to be called. * Reasons for the update_lastrec method to be called.
...@@ -292,7 +299,7 @@ struct xfs_btree_cur ...@@ -292,7 +299,7 @@ struct xfs_btree_cur
/* /*
* Short btree pointers need an agno to be able to turn the pointers * Short btree pointers need an agno to be able to turn the pointers
* into physical addresses for IO, so the btree cursor switches between * into physical addresses for IO, so the btree cursor switches between
* bc_ino and bc_ag based on whether XFS_BTGEO_ROOT_IN_INODE is set for * bc_ino and bc_ag based on bc_ops->type.
* the cursor. * the cursor.
*/ */
union { union {
......
...@@ -136,7 +136,7 @@ xfs_btree_stage_afakeroot( ...@@ -136,7 +136,7 @@ xfs_btree_stage_afakeroot(
struct xfs_btree_ops *nops; struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING)); ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(!(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)); ASSERT(cur->bc_ops->type != XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL); ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL); nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
...@@ -217,7 +217,7 @@ xfs_btree_stage_ifakeroot( ...@@ -217,7 +217,7 @@ xfs_btree_stage_ifakeroot(
struct xfs_btree_ops *nops; struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING)); ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE); ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL); ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL); nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
...@@ -397,7 +397,7 @@ xfs_btree_bload_prep_block( ...@@ -397,7 +397,7 @@ xfs_btree_bload_prep_block(
struct xfs_btree_block *new_block; struct xfs_btree_block *new_block;
int ret; int ret;
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 1) { level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
size_t new_size; size_t new_size;
...@@ -702,7 +702,7 @@ xfs_btree_bload_compute_geometry( ...@@ -702,7 +702,7 @@ xfs_btree_bload_compute_geometry(
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level, xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &level_blocks, &dontcare64); &avg_per_block, &level_blocks, &dontcare64);
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) { if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
/* /*
* If all the items we want to store at this level * If all the items we want to store at this level
* would fit in the inode root block, then we have our * would fit in the inode root block, then we have our
...@@ -761,7 +761,7 @@ xfs_btree_bload_compute_geometry( ...@@ -761,7 +761,7 @@ xfs_btree_bload_compute_geometry(
return -EOVERFLOW; return -EOVERFLOW;
bbl->btree_height = cur->bc_nlevels; bbl->btree_height = cur->bc_nlevels;
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
bbl->nr_blocks = nr_blocks - 1; bbl->nr_blocks = nr_blocks - 1;
else else
bbl->nr_blocks = nr_blocks; bbl->nr_blocks = nr_blocks;
...@@ -888,7 +888,7 @@ xfs_btree_bload( ...@@ -888,7 +888,7 @@ xfs_btree_bload(
} }
/* Initialize the new root. */ /* Initialize the new root. */
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) { if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
cur->bc_ino.ifake->if_levels = cur->bc_nlevels; cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
cur->bc_ino.ifake->if_blocks = total_blocks - 1; cur->bc_ino.ifake->if_blocks = total_blocks - 1;
......
...@@ -76,8 +76,7 @@ struct xfs_btree_bload { ...@@ -76,8 +76,7 @@ struct xfs_btree_bload {
/* /*
* This function should return the size of the in-core btree root * This function should return the size of the in-core btree root
* block. It is only necessary for XFS_BTGEO_ROOT_IN_INODE btree * block. It is only necessary for XFS_BTREE_TYPE_INODE btrees.
* types.
*/ */
xfs_btree_bload_iroot_size_fn iroot_size; xfs_btree_bload_iroot_size_fn iroot_size;
......
...@@ -399,6 +399,8 @@ xfs_inobt_keys_contiguous( ...@@ -399,6 +399,8 @@ xfs_inobt_keys_contiguous(
} }
const struct xfs_btree_ops xfs_inobt_ops = { const struct xfs_btree_ops xfs_inobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t), .rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t), .key_len = sizeof(xfs_inobt_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN, .ptr_len = XFS_BTREE_SHORT_PTR_LEN,
...@@ -425,6 +427,8 @@ const struct xfs_btree_ops xfs_inobt_ops = { ...@@ -425,6 +427,8 @@ const struct xfs_btree_ops xfs_inobt_ops = {
}; };
const struct xfs_btree_ops xfs_finobt_ops = { const struct xfs_btree_ops xfs_finobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t), .rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t), .key_len = sizeof(xfs_inobt_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN, .ptr_len = XFS_BTREE_SHORT_PTR_LEN,
......
...@@ -318,6 +318,8 @@ xfs_refcountbt_keys_contiguous( ...@@ -318,6 +318,8 @@ xfs_refcountbt_keys_contiguous(
} }
const struct xfs_btree_ops xfs_refcountbt_ops = { const struct xfs_btree_ops xfs_refcountbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(struct xfs_refcount_rec), .rec_len = sizeof(struct xfs_refcount_rec),
.key_len = sizeof(struct xfs_refcount_key), .key_len = sizeof(struct xfs_refcount_key),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN, .ptr_len = XFS_BTREE_SHORT_PTR_LEN,
......
...@@ -473,6 +473,7 @@ xfs_rmapbt_keys_contiguous( ...@@ -473,6 +473,7 @@ xfs_rmapbt_keys_contiguous(
} }
const struct xfs_btree_ops xfs_rmapbt_ops = { const struct xfs_btree_ops xfs_rmapbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_OVERLAPPING, .geom_flags = XFS_BTGEO_OVERLAPPING,
.rec_len = sizeof(struct xfs_rmap_rec), .rec_len = sizeof(struct xfs_rmap_rec),
......
...@@ -47,7 +47,7 @@ __xchk_btree_process_error( ...@@ -47,7 +47,7 @@ __xchk_btree_process_error(
*error = 0; *error = 0;
fallthrough; fallthrough;
default: default:
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
trace_xchk_ifork_btree_op_error(sc, cur, level, trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip); *error, ret_ip);
else else
...@@ -91,7 +91,7 @@ __xchk_btree_set_corrupt( ...@@ -91,7 +91,7 @@ __xchk_btree_set_corrupt(
{ {
sc->sm->sm_flags |= errflag; sc->sm->sm_flags |= errflag;
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
trace_xchk_ifork_btree_error(sc, cur, level, trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip); ret_ip);
else else
...@@ -239,7 +239,7 @@ xchk_btree_ptr_ok( ...@@ -239,7 +239,7 @@ xchk_btree_ptr_ok(
bool res; bool res;
/* A btree rooted in an inode has no block pointer to the root. */ /* A btree rooted in an inode has no block pointer to the root. */
if ((bs->cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (bs->cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == bs->cur->bc_nlevels) level == bs->cur->bc_nlevels)
return true; return true;
...@@ -390,7 +390,7 @@ xchk_btree_check_block_owner( ...@@ -390,7 +390,7 @@ xchk_btree_check_block_owner(
* sc->sa so that we can check for the presence of an ownership record * sc->sa so that we can check for the presence of an ownership record
* in the rmap btree for the AG containing the block. * in the rmap btree for the AG containing the block.
*/ */
init_sa = bs->cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE; init_sa = bs->cur->bc_ops->type != XFS_BTREE_TYPE_AG;
if (init_sa) { if (init_sa) {
error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa); error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
...@@ -434,7 +434,7 @@ xchk_btree_check_owner( ...@@ -434,7 +434,7 @@ xchk_btree_check_owner(
* up. * up.
*/ */
if (bp == NULL) { if (bp == NULL) {
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)) if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE)
xchk_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0; return 0;
} }
...@@ -513,7 +513,7 @@ xchk_btree_check_minrecs( ...@@ -513,7 +513,7 @@ xchk_btree_check_minrecs(
* child block might be less than the standard minrecs, but that's ok * child block might be less than the standard minrecs, but that's ok
* provided that there's only one direct child of the root. * provided that there's only one direct child of the root.
*/ */
if ((cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) && if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 2) { level == cur->bc_nlevels - 2) {
struct xfs_btree_block *root_block; struct xfs_btree_block *root_block;
struct xfs_buf *root_bp; struct xfs_buf *root_bp;
......
...@@ -37,7 +37,7 @@ xchk_btree_cur_fsbno( ...@@ -37,7 +37,7 @@ xchk_btree_cur_fsbno(
xfs_buf_daddr(cur->bc_levels[level].bp)); xfs_buf_daddr(cur->bc_levels[level].bp));
if (level == cur->bc_nlevels - 1 && if (level == cur->bc_nlevels - 1 &&
(cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE)) cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino); return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
return NULLFSBLOCK; return NULLFSBLOCK;
......
...@@ -2510,7 +2510,7 @@ TRACE_EVENT(xfs_btree_alloc_block, ...@@ -2510,7 +2510,7 @@ TRACE_EVENT(xfs_btree_alloc_block,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev; __entry->dev = cur->bc_mp->m_super->s_dev;
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) { if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
__entry->agno = 0; __entry->agno = 0;
__entry->ino = cur->bc_ino.ip->i_ino; __entry->ino = cur->bc_ino.ip->i_ino;
} else { } else {
...@@ -2557,7 +2557,7 @@ TRACE_EVENT(xfs_btree_free_block, ...@@ -2557,7 +2557,7 @@ TRACE_EVENT(xfs_btree_free_block,
__entry->dev = cur->bc_mp->m_super->s_dev; __entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = xfs_daddr_to_agno(cur->bc_mp, __entry->agno = xfs_daddr_to_agno(cur->bc_mp,
xfs_buf_daddr(bp)); xfs_buf_daddr(bp));
if (cur->bc_ops->geom_flags & XFS_BTGEO_ROOT_IN_INODE) if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
__entry->ino = cur->bc_ino.ip->i_ino; __entry->ino = cur->bc_ino.ip->i_ino;
else else
__entry->ino = 0; __entry->ino = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment