Commit fd43925c authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'repair-rmap-btree-6.9_2024-02-23' of...

Merge tag 'repair-rmap-btree-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.9-mergeC

xfs: online repair of rmap btrees

We have now constructed the four tools that we need to scan the
filesystem looking for reverse mappings: an inode scanner, hooks to
receive live updates from other writer threads, the ability to construct
btrees in memory, and a btree bulk loader.

This series glues those three together, enabling us to scan the
filesystem for mappings and keep it up to date while other writers run,
and then commit the new btree to disk atomically.

To reduce the size of each patch, the functionality is left disabled
until the end of the series and broken up into three patches: one to
create the mechanics of scanning the filesystem, a second to transition
to in-memory btrees, and a third to set up the live hooks.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'repair-rmap-btree-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: hook live rmap operations during a repair operation
  xfs: create a shadow rmap btree during rmap repair
  xfs: repair the rmapbt
  xfs: create agblock bitmap helper to count the number of set regions
  xfs: create a helper to decide if a file mapping targets the rt volume
parents 8394a97c 7e1b84b2
......@@ -201,6 +201,7 @@ xfs-y += $(addprefix scrub/, \
reap.o \
refcount_repair.o \
repair.o \
rmap_repair.o \
)
xfs-$(CONFIG_XFS_RT) += $(addprefix scrub/, \
......
......@@ -417,6 +417,7 @@ xfs_initialize_perag(
init_waitqueue_head(&pag->pag_active_wq);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
xfs_hooks_init(&pag->pag_rmap_update_hooks);
#endif /* __KERNEL__ */
error = xfs_buf_cache_init(&pag->pag_bcache);
......
......@@ -90,6 +90,7 @@ struct xfs_perag {
uint8_t pagf_repair_bno_level;
uint8_t pagf_repair_cnt_level;
uint8_t pagf_repair_refcount_level;
uint8_t pagf_repair_rmap_level;
#endif
spinlock_t pag_state_lock;
......@@ -119,6 +120,9 @@ struct xfs_perag {
* inconsistencies.
*/
struct xfs_defer_drain pag_intents_drain;
/* Hook to feed rmapbt updates to an active online repair. */
struct xfs_hooks pag_rmap_update_hooks;
#endif /* __KERNEL__ */
};
......
......@@ -4913,7 +4913,7 @@ xfs_bmap_del_extent_delay(
XFS_STATS_INC(mp, xs_del_exlist);
isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
isrt = xfs_ifork_is_realtime(ip, whichfork);
del_endoff = del->br_startoff + del->br_blockcount;
got_endoff = got->br_startoff + got->br_blockcount;
da_old = startblockval(got->br_startblock);
......@@ -5149,7 +5149,7 @@ xfs_bmap_del_extent_real(
return -ENOSPC;
*logflagsp = XFS_ILOG_CORE;
if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
if (xfs_ifork_is_realtime(ip, whichfork)) {
if (!(bflags & XFS_BMAPI_REMAP)) {
error = xfs_rtfree_blocks(tp, del->br_startblock,
del->br_blockcount);
......@@ -5396,7 +5396,7 @@ __xfs_bunmapi(
return 0;
}
XFS_STATS_INC(mp, xs_blk_unmap);
isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
isrt = xfs_ifork_is_realtime(ip, whichfork);
end = start + len;
if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
......@@ -6379,3 +6379,46 @@ xfs_bunmapi_range(
out:
return error;
}
struct xfs_bmap_query_range {
xfs_bmap_query_range_fn fn;
void *priv;
};
/* Format btree record and pass to our callback. */
STATIC int
xfs_bmap_query_range_helper(
struct xfs_btree_cur *cur,
const union xfs_btree_rec *rec,
void *priv)
{
struct xfs_bmap_query_range *query = priv;
struct xfs_bmbt_irec irec;
xfs_failaddr_t fa;
xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
&irec);
if (fa) {
xfs_btree_mark_sick(cur);
return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
cur->bc_ino.whichfork, fa, &irec);
}
return query->fn(cur, &irec, query->priv);
}
/* Find all bmaps. */
int
xfs_bmap_query_all(
struct xfs_btree_cur *cur,
xfs_bmap_query_range_fn fn,
void *priv)
{
struct xfs_bmap_query_range query = {
.priv = priv,
.fn = fn,
};
return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
}
......@@ -280,4 +280,12 @@ extern struct kmem_cache *xfs_bmap_intent_cache;
int __init xfs_bmap_intent_init_cache(void);
void xfs_bmap_intent_destroy_cache(void);
typedef int (*xfs_bmap_query_range_fn)(
struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *rec,
void *priv);
int xfs_bmap_query_all(struct xfs_btree_cur *cur, xfs_bmap_query_range_fn fn,
void *priv);
#endif /* __XFS_BMAP_H__ */
......@@ -813,3 +813,12 @@ xfs_iext_count_upgrade(
return 0;
}
/* Decide if a file mapping is on the realtime device or not. */
bool
xfs_ifork_is_realtime(
struct xfs_inode *ip,
int whichfork)
{
return XFS_IS_REALTIME_INODE(ip) && whichfork != XFS_ATTR_FORK;
}
......@@ -260,6 +260,7 @@ int xfs_iext_count_may_overflow(struct xfs_inode *ip, int whichfork,
int nr_to_add);
int xfs_iext_count_upgrade(struct xfs_trans *tp, struct xfs_inode *ip,
uint nr_to_add);
bool xfs_ifork_is_realtime(struct xfs_inode *ip, int whichfork);
/* returns true if the fork has extents but they are not read in yet. */
static inline bool xfs_need_iread_extents(const struct xfs_ifork *ifp)
......
......@@ -215,10 +215,10 @@ xfs_rmap_btrec_to_irec(
/* Simple checks for rmap records. */
xfs_failaddr_t
xfs_rmap_check_irec(
struct xfs_btree_cur *cur,
struct xfs_perag *pag,
const struct xfs_rmap_irec *irec)
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_mount *mp = pag->pag_mount;
bool is_inode;
bool is_unwritten;
bool is_bmbt;
......@@ -233,8 +233,8 @@ xfs_rmap_check_irec(
return __this_address;
} else {
/* check for valid extent range, including overflow */
if (!xfs_verify_agbext(cur->bc_ag.pag, irec->rm_startblock,
irec->rm_blockcount))
if (!xfs_verify_agbext(pag, irec->rm_startblock,
irec->rm_blockcount))
return __this_address;
}
......@@ -269,6 +269,16 @@ xfs_rmap_check_irec(
return NULL;
}
static inline xfs_failaddr_t
xfs_rmap_check_btrec(
struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *irec)
{
if (xfs_btree_is_mem_rmap(cur->bc_ops))
return xfs_rmap_check_irec(cur->bc_mem.pag, irec);
return xfs_rmap_check_irec(cur->bc_ag.pag, irec);
}
static inline int
xfs_rmap_complain_bad_rec(
struct xfs_btree_cur *cur,
......@@ -277,9 +287,13 @@ xfs_rmap_complain_bad_rec(
{
struct xfs_mount *mp = cur->bc_mp;
xfs_warn(mp,
"Reverse Mapping BTree record corruption in AG %d detected at %pS!",
cur->bc_ag.pag->pag_agno, fa);
if (xfs_btree_is_mem_rmap(cur->bc_ops))
xfs_warn(mp,
"In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa);
else
xfs_warn(mp,
"Reverse Mapping BTree record corruption in AG %d detected at %pS!",
cur->bc_ag.pag->pag_agno, fa);
xfs_warn(mp,
"Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
irec->rm_owner, irec->rm_flags, irec->rm_startblock,
......@@ -307,7 +321,7 @@ xfs_rmap_get_rec(
fa = xfs_rmap_btrec_to_irec(rec, irec);
if (!fa)
fa = xfs_rmap_check_irec(cur, irec);
fa = xfs_rmap_check_btrec(cur, irec);
if (fa)
return xfs_rmap_complain_bad_rec(cur, fa, irec);
......@@ -807,6 +821,86 @@ xfs_rmap_unmap(
return error;
}
#ifdef CONFIG_XFS_LIVE_HOOKS
/*
* Use a static key here to reduce the overhead of rmapbt live updates. If
* the compiler supports jump labels, the static branch will be replaced by a
* nop sled when there are no hook users. Online fsck is currently the only
* caller, so this is a reasonable tradeoff.
*
* Note: Patching the kernel code requires taking the cpu hotplug lock. Other
* parts of the kernel allocate memory with that lock held, which means that
* XFS callers cannot hold any locks that might be used by memory reclaim or
* writeback when calling the static_branch_{inc,dec} functions.
*/
DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_rmap_hooks_switch);
void
xfs_rmap_hook_disable(void)
{
xfs_hooks_switch_off(&xfs_rmap_hooks_switch);
}
void
xfs_rmap_hook_enable(void)
{
xfs_hooks_switch_on(&xfs_rmap_hooks_switch);
}
/* Call downstream hooks for a reverse mapping update. */
static inline void
xfs_rmap_update_hook(
struct xfs_trans *tp,
struct xfs_perag *pag,
enum xfs_rmap_intent_type op,
xfs_agblock_t startblock,
xfs_extlen_t blockcount,
bool unwritten,
const struct xfs_owner_info *oinfo)
{
if (xfs_hooks_switched_on(&xfs_rmap_hooks_switch)) {
struct xfs_rmap_update_params p = {
.startblock = startblock,
.blockcount = blockcount,
.unwritten = unwritten,
.oinfo = *oinfo, /* struct copy */
};
if (pag)
xfs_hooks_call(&pag->pag_rmap_update_hooks, op, &p);
}
}
/* Call the specified function during a reverse mapping update. */
int
xfs_rmap_hook_add(
struct xfs_perag *pag,
struct xfs_rmap_hook *hook)
{
return xfs_hooks_add(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
}
/* Stop calling the specified function during a reverse mapping update. */
void
xfs_rmap_hook_del(
struct xfs_perag *pag,
struct xfs_rmap_hook *hook)
{
xfs_hooks_del(&pag->pag_rmap_update_hooks, &hook->rmap_hook);
}
/* Configure rmap update hook functions. */
void
xfs_rmap_hook_setup(
struct xfs_rmap_hook *hook,
notifier_fn_t mod_fn)
{
xfs_hook_setup(&hook->rmap_hook, mod_fn);
}
#else
# define xfs_rmap_update_hook(t, p, o, s, b, u, oi) do { } while (0)
#endif /* CONFIG_XFS_LIVE_HOOKS */
/*
* Remove a reference to an extent in the rmap btree.
*/
......@@ -827,7 +921,7 @@ xfs_rmap_free(
return 0;
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
xfs_rmap_update_hook(tp, pag, XFS_RMAP_UNMAP, bno, len, false, oinfo);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
xfs_btree_del_cursor(cur, error);
......@@ -1079,6 +1173,7 @@ xfs_rmap_alloc(
return 0;
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
xfs_rmap_update_hook(tp, pag, XFS_RMAP_MAP, bno, len, false, oinfo);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
xfs_btree_del_cursor(cur, error);
......@@ -2404,15 +2499,12 @@ xfs_rmap_map_raw(
{
struct xfs_owner_info oinfo;
oinfo.oi_owner = rmap->rm_owner;
oinfo.oi_offset = rmap->rm_offset;
oinfo.oi_flags = 0;
if (rmap->rm_flags & XFS_RMAP_ATTR_FORK)
oinfo.oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
if (rmap->rm_flags & XFS_RMAP_BMBT_BLOCK)
oinfo.oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK;
xfs_owner_info_pack(&oinfo, rmap->rm_owner, rmap->rm_offset,
rmap->rm_flags);
if (rmap->rm_flags || XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
if ((rmap->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
XFS_RMAP_UNWRITTEN)) ||
XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
return xfs_rmap_map(cur, rmap->rm_startblock,
rmap->rm_blockcount,
rmap->rm_flags & XFS_RMAP_UNWRITTEN,
......@@ -2442,7 +2534,7 @@ xfs_rmap_query_range_helper(
fa = xfs_rmap_btrec_to_irec(rec, &irec);
if (!fa)
fa = xfs_rmap_check_irec(cur, &irec);
fa = xfs_rmap_check_btrec(cur, &irec);
if (fa)
return xfs_rmap_complain_bad_rec(cur, fa, &irec);
......@@ -2497,6 +2589,38 @@ xfs_rmap_finish_one_cleanup(
xfs_trans_brelse(tp, agbp);
}
/* Commit an rmap operation into the ondisk tree. */
int
__xfs_rmap_finish_intent(
struct xfs_btree_cur *rcur,
enum xfs_rmap_intent_type op,
xfs_agblock_t bno,
xfs_extlen_t len,
const struct xfs_owner_info *oinfo,
bool unwritten)
{
switch (op) {
case XFS_RMAP_ALLOC:
case XFS_RMAP_MAP:
return xfs_rmap_map(rcur, bno, len, unwritten, oinfo);
case XFS_RMAP_MAP_SHARED:
return xfs_rmap_map_shared(rcur, bno, len, unwritten, oinfo);
case XFS_RMAP_FREE:
case XFS_RMAP_UNMAP:
return xfs_rmap_unmap(rcur, bno, len, unwritten, oinfo);
case XFS_RMAP_UNMAP_SHARED:
return xfs_rmap_unmap_shared(rcur, bno, len, unwritten, oinfo);
case XFS_RMAP_CONVERT:
return xfs_rmap_convert(rcur, bno, len, !unwritten, oinfo);
case XFS_RMAP_CONVERT_SHARED:
return xfs_rmap_convert_shared(rcur, bno, len, !unwritten,
oinfo);
default:
ASSERT(0);
return -EFSCORRUPTED;
}
}
/*
* Process one of the deferred rmap operations. We pass back the
* btree cursor to maintain our lock on the rmapbt between calls.
......@@ -2563,39 +2687,14 @@ xfs_rmap_finish_one(
unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
switch (ri->ri_type) {
case XFS_RMAP_ALLOC:
case XFS_RMAP_MAP:
error = xfs_rmap_map(rcur, bno, ri->ri_bmap.br_blockcount,
unwritten, &oinfo);
break;
case XFS_RMAP_MAP_SHARED:
error = xfs_rmap_map_shared(rcur, bno,
ri->ri_bmap.br_blockcount, unwritten, &oinfo);
break;
case XFS_RMAP_FREE:
case XFS_RMAP_UNMAP:
error = xfs_rmap_unmap(rcur, bno, ri->ri_bmap.br_blockcount,
unwritten, &oinfo);
break;
case XFS_RMAP_UNMAP_SHARED:
error = xfs_rmap_unmap_shared(rcur, bno,
ri->ri_bmap.br_blockcount, unwritten, &oinfo);
break;
case XFS_RMAP_CONVERT:
error = xfs_rmap_convert(rcur, bno, ri->ri_bmap.br_blockcount,
!unwritten, &oinfo);
break;
case XFS_RMAP_CONVERT_SHARED:
error = xfs_rmap_convert_shared(rcur, bno,
ri->ri_bmap.br_blockcount, !unwritten, &oinfo);
break;
default:
ASSERT(0);
error = -EFSCORRUPTED;
}
error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
ri->ri_bmap.br_blockcount, &oinfo, unwritten);
if (error)
return error;
return error;
xfs_rmap_update_hook(tp, ri->ri_pag, ri->ri_type, bno,
ri->ri_bmap.br_blockcount, unwritten, &oinfo);
return 0;
}
/*
......
......@@ -186,6 +186,10 @@ void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
int xfs_rmap_finish_one(struct xfs_trans *tp, struct xfs_rmap_intent *ri,
struct xfs_btree_cur **pcur);
int __xfs_rmap_finish_intent(struct xfs_btree_cur *rcur,
enum xfs_rmap_intent_type op, xfs_agblock_t bno,
xfs_extlen_t len, const struct xfs_owner_info *oinfo,
bool unwritten);
int xfs_rmap_lookup_le_range(struct xfs_btree_cur *cur, xfs_agblock_t bno,
uint64_t owner, uint64_t offset, unsigned int flags,
......@@ -195,7 +199,7 @@ int xfs_rmap_compare(const struct xfs_rmap_irec *a,
union xfs_btree_rec;
xfs_failaddr_t xfs_rmap_btrec_to_irec(const union xfs_btree_rec *rec,
struct xfs_rmap_irec *irec);
xfs_failaddr_t xfs_rmap_check_irec(struct xfs_btree_cur *cur,
xfs_failaddr_t xfs_rmap_check_irec(struct xfs_perag *pag,
const struct xfs_rmap_irec *irec);
int xfs_rmap_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
......@@ -235,4 +239,29 @@ extern struct kmem_cache *xfs_rmap_intent_cache;
int __init xfs_rmap_intent_init_cache(void);
void xfs_rmap_intent_destroy_cache(void);
/*
* Parameters for tracking reverse mapping changes. The hook function arg
* parameter is enum xfs_rmap_intent_type, and the rest is below.
*/
struct xfs_rmap_update_params {
xfs_agblock_t startblock;
xfs_extlen_t blockcount;
struct xfs_owner_info oinfo;
bool unwritten;
};
#ifdef CONFIG_XFS_LIVE_HOOKS
struct xfs_rmap_hook {
struct xfs_hook rmap_hook;
};
void xfs_rmap_hook_disable(void);
void xfs_rmap_hook_enable(void);
int xfs_rmap_hook_add(struct xfs_perag *pag, struct xfs_rmap_hook *hook);
void xfs_rmap_hook_del(struct xfs_perag *pag, struct xfs_rmap_hook *hook);
void xfs_rmap_hook_setup(struct xfs_rmap_hook *hook, notifier_fn_t mod_fn);
#endif
#endif /* __XFS_RMAP_H__ */
......@@ -22,6 +22,8 @@
#include "xfs_extent_busy.h"
#include "xfs_ag.h"
#include "xfs_ag_resv.h"
#include "xfs_buf_mem.h"
#include "xfs_btree_mem.h"
static struct kmem_cache *xfs_rmapbt_cur_cache;
......@@ -342,7 +344,18 @@ xfs_rmapbt_verify(
level = be16_to_cpu(block->bb_level);
if (pag && xfs_perag_initialised_agf(pag)) {
if (level >= pag->pagf_rmap_level)
unsigned int maxlevel = pag->pagf_rmap_level;
#ifdef CONFIG_XFS_ONLINE_REPAIR
/*
* Online repair could be rewriting the free space btrees, so
* we'll validate against the larger of either tree while this
* is going on.
*/
maxlevel = max_t(unsigned int, maxlevel,
pag->pagf_repair_rmap_level);
#endif
if (level >= maxlevel)
return __this_address;
} else if (level >= mp->m_rmap_maxlevels)
return __this_address;
......@@ -530,6 +543,151 @@ xfs_rmapbt_init_cursor(
return cur;
}
#ifdef CONFIG_XFS_BTREE_IN_MEM
static inline unsigned int
xfs_rmapbt_mem_block_maxrecs(
unsigned int blocklen,
bool leaf)
{
if (leaf)
return blocklen / sizeof(struct xfs_rmap_rec);
return blocklen /
(2 * sizeof(struct xfs_rmap_key) + sizeof(__be64));
}
/*
* Validate an in-memory rmap btree block. Callers are allowed to generate an
* in-memory btree even if the ondisk feature is not enabled.
*/
static xfs_failaddr_t
xfs_rmapbt_mem_verify(
struct xfs_buf *bp)
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
xfs_failaddr_t fa;
unsigned int level;
unsigned int maxrecs;
if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
if (fa)
return fa;
level = be16_to_cpu(block->bb_level);
if (level >= xfs_rmapbt_maxlevels_ondisk())
return __this_address;
maxrecs = xfs_rmapbt_mem_block_maxrecs(
XFBNO_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN, level == 0);
return xfs_btree_memblock_verify(bp, maxrecs);
}
static void
xfs_rmapbt_mem_rw_verify(
struct xfs_buf *bp)
{
xfs_failaddr_t fa = xfs_rmapbt_mem_verify(bp);
if (fa)
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
}
/* skip crc checks on in-memory btrees to save time */
static const struct xfs_buf_ops xfs_rmapbt_mem_buf_ops = {
.name = "xfs_rmapbt_mem",
.magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
.verify_read = xfs_rmapbt_mem_rw_verify,
.verify_write = xfs_rmapbt_mem_rw_verify,
.verify_struct = xfs_rmapbt_mem_verify,
};
const struct xfs_btree_ops xfs_rmapbt_mem_ops = {
.name = "mem_rmap",
.type = XFS_BTREE_TYPE_MEM,
.geom_flags = XFS_BTGEO_OVERLAPPING,
.rec_len = sizeof(struct xfs_rmap_rec),
/* Overlapping btree; 2 keys per pointer. */
.key_len = 2 * sizeof(struct xfs_rmap_key),
.ptr_len = XFS_BTREE_LONG_PTR_LEN,
.lru_refs = XFS_RMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_rmap_mem_2),
.dup_cursor = xfbtree_dup_cursor,
.set_root = xfbtree_set_root,
.alloc_block = xfbtree_alloc_block,
.free_block = xfbtree_free_block,
.get_minrecs = xfbtree_get_minrecs,
.get_maxrecs = xfbtree_get_maxrecs,
.init_key_from_rec = xfs_rmapbt_init_key_from_rec,
.init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
.init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
.init_ptr_from_cur = xfbtree_init_ptr_from_cur,
.key_diff = xfs_rmapbt_key_diff,
.buf_ops = &xfs_rmapbt_mem_buf_ops,
.diff_two_keys = xfs_rmapbt_diff_two_keys,
.keys_inorder = xfs_rmapbt_keys_inorder,
.recs_inorder = xfs_rmapbt_recs_inorder,
.keys_contiguous = xfs_rmapbt_keys_contiguous,
};
/* Create a cursor for an in-memory btree. */
struct xfs_btree_cur *
xfs_rmapbt_mem_cursor(
struct xfs_perag *pag,
struct xfs_trans *tp,
struct xfbtree *xfbt)
{
struct xfs_btree_cur *cur;
struct xfs_mount *mp = pag->pag_mount;
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_mem_ops,
xfs_rmapbt_maxlevels_ondisk(), xfs_rmapbt_cur_cache);
cur->bc_mem.xfbtree = xfbt;
cur->bc_nlevels = xfbt->nlevels;
cur->bc_mem.pag = xfs_perag_hold(pag);
return cur;
}
/* Create an in-memory rmap btree. */
int
xfs_rmapbt_mem_init(
struct xfs_mount *mp,
struct xfbtree *xfbt,
struct xfs_buftarg *btp,
xfs_agnumber_t agno)
{
xfbt->owner = agno;
return xfbtree_init(mp, xfbt, btp, &xfs_rmapbt_mem_ops);
}
/* Compute the max possible height for reverse mapping btrees in memory. */
static unsigned int
xfs_rmapbt_mem_maxlevels(void)
{
unsigned int minrecs[2];
unsigned int blocklen;
blocklen = XFBNO_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
minrecs[0] = xfs_rmapbt_mem_block_maxrecs(blocklen, true) / 2;
minrecs[1] = xfs_rmapbt_mem_block_maxrecs(blocklen, false) / 2;
/*
* How tall can an in-memory rmap btree become if we filled the entire
* AG with rmap records?
*/
return xfs_btree_compute_maxlevels(minrecs,
XFS_MAX_AG_BYTES / sizeof(struct xfs_rmap_rec));
}
#else
# define xfs_rmapbt_mem_maxlevels() (0)
#endif /* CONFIG_XFS_BTREE_IN_MEM */
/*
* Install a new reverse mapping btree root. Caller is responsible for
* invalidating and freeing the old btree blocks.
......@@ -600,7 +758,8 @@ xfs_rmapbt_maxlevels_ondisk(void)
* like if it consumes almost all the blocks in the AG due to maximal
* sharing factor.
*/
return xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS);
return max(xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS),
xfs_rmapbt_mem_maxlevels());
}
/* Compute the maximum height of an rmap btree. */
......
......@@ -10,6 +10,7 @@ struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
struct xbtree_afakeroot;
struct xfbtree;
/* rmaps only exist on crc enabled filesystems */
#define XFS_RMAP_BLOCK_LEN XFS_BTREE_SBLOCK_CRC_LEN
......@@ -62,4 +63,9 @@ unsigned int xfs_rmapbt_maxlevels_ondisk(void);
int __init xfs_rmapbt_init_cur_cache(void);
void xfs_rmapbt_destroy_cur_cache(void);
struct xfs_btree_cur *xfs_rmapbt_mem_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfbtree *xfbtree);
int xfs_rmapbt_mem_init(struct xfs_mount *mp, struct xfbtree *xfbtree,
struct xfs_buftarg *btp, xfs_agnumber_t agno);
#endif /* __XFS_RMAP_BTREE_H__ */
......@@ -51,6 +51,7 @@ extern const struct xfs_btree_ops xfs_finobt_ops;
extern const struct xfs_btree_ops xfs_bmbt_ops;
extern const struct xfs_btree_ops xfs_refcountbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_mem_ops;
static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops)
{
......@@ -87,6 +88,15 @@ static inline bool xfs_btree_is_rmap(const struct xfs_btree_ops *ops)
return ops == &xfs_rmapbt_ops;
}
#ifdef CONFIG_XFS_BTREE_IN_MEM
static inline bool xfs_btree_is_mem_rmap(const struct xfs_btree_ops *ops)
{
return ops == &xfs_rmapbt_mem_ops;
}
#else
# define xfs_btree_is_mem_rmap(...) (false)
#endif
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
......
......@@ -65,4 +65,9 @@ int xagb_bitmap_set_btblocks(struct xagb_bitmap *bitmap,
int xagb_bitmap_set_btcur_path(struct xagb_bitmap *bitmap,
struct xfs_btree_cur *cur);
static inline uint32_t xagb_bitmap_count_set_regions(struct xagb_bitmap *b)
{
return xbitmap32_count_set_regions(&b->agbitmap);
}
#endif /* __XFS_SCRUB_AGB_BITMAP_H__ */
......@@ -566,3 +566,17 @@ xbitmap32_test(
*len = bn->bn_start - start;
return false;
}
/* Count the number of set regions in this bitmap. */
uint32_t
xbitmap32_count_set_regions(
struct xbitmap32 *bitmap)
{
struct xbitmap32_node *bn;
uint32_t nr = 0;
for_each_xbitmap32_extent(bn, bitmap)
nr++;
return nr;
}
......@@ -62,4 +62,6 @@ int xbitmap32_walk(struct xbitmap32 *bitmap, xbitmap32_walk_fn fn,
bool xbitmap32_empty(struct xbitmap32 *bitmap);
bool xbitmap32_test(struct xbitmap32 *bitmap, uint32_t start, uint32_t *len);
uint32_t xbitmap32_count_set_regions(struct xbitmap32 *bitmap);
#endif /* __XFS_SCRUB_BITMAP_H__ */
......@@ -924,7 +924,7 @@ xchk_bmap(
if (!ifp)
return -ENOENT;
info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
info.is_rt = xfs_ifork_is_realtime(ip, whichfork);
info.whichfork = whichfork;
info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
info.sc = sc;
......
......@@ -460,7 +460,7 @@ xchk_perag_read_headers(
* Grab the AG headers for the attached perag structure and wait for pending
* intents to drain.
*/
static int
int
xchk_perag_drain_and_lock(
struct xfs_scrub *sc)
{
......@@ -1309,6 +1309,9 @@ xchk_fsgates_enable(
if (scrub_fsgates & XCHK_FSGATES_DIRENTS)
xfs_dir_hook_enable();
if (scrub_fsgates & XCHK_FSGATES_RMAP)
xfs_rmap_hook_enable();
sc->flags |= scrub_fsgates;
}
......
......@@ -134,6 +134,7 @@ int xchk_setup_nlinks(struct xfs_scrub *sc);
void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
struct xchk_ag *sa);
int xchk_perag_drain_and_lock(struct xfs_scrub *sc);
/*
* Grab all AG resources, treating the inability to grab the perag structure as
......
......@@ -239,7 +239,11 @@ xrep_newbt_alloc_ag_blocks(
xrep_newbt_validate_ag_alloc_hint(xnr);
error = xfs_alloc_vextent_near_bno(&args, xnr->alloc_hint);
if (xnr->alloc_vextent)
error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint);
else
error = xfs_alloc_vextent_near_bno(&args,
xnr->alloc_hint);
if (error)
return error;
if (args.fsbno == NULLFSBLOCK)
......@@ -309,7 +313,11 @@ xrep_newbt_alloc_file_blocks(
xrep_newbt_validate_file_alloc_hint(xnr);
error = xfs_alloc_vextent_start_ag(&args, xnr->alloc_hint);
if (xnr->alloc_vextent)
error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint);
else
error = xfs_alloc_vextent_start_ag(&args,
xnr->alloc_hint);
if (error)
return error;
if (args.fsbno == NULLFSBLOCK)
......
......@@ -6,6 +6,8 @@
#ifndef __XFS_SCRUB_NEWBT_H__
#define __XFS_SCRUB_NEWBT_H__
struct xfs_alloc_arg;
struct xrep_newbt_resv {
/* Link to list of extents that we've reserved. */
struct list_head list;
......@@ -28,6 +30,11 @@ struct xrep_newbt_resv {
struct xrep_newbt {
struct xfs_scrub *sc;
/* Custom allocation function, or NULL for xfs_alloc_vextent */
int (*alloc_vextent)(struct xfs_scrub *sc,
struct xfs_alloc_arg *args,
xfs_fsblock_t alloc_hint);
/* List of extents that we've reserved. */
struct list_head resv_list;
......
......@@ -114,7 +114,7 @@ xreap_put_freelist(
int error;
/* Make sure there's space on the freelist. */
error = xrep_fix_freelist(sc, true);
error = xrep_fix_freelist(sc, 0);
if (error)
return error;
......
......@@ -31,12 +31,14 @@
#include "xfs_error.h"
#include "xfs_reflink.h"
#include "xfs_health.h"
#include "xfs_buf_mem.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
#include "scrub/bitmap.h"
#include "scrub/stats.h"
#include "scrub/xfile.h"
/*
* Attempt to repair some metadata, if the metadata is corrupt and userspace
......@@ -401,7 +403,7 @@ xrep_calc_ag_resblks(
int
xrep_fix_freelist(
struct xfs_scrub *sc,
bool can_shrink)
int alloc_flags)
{
struct xfs_alloc_arg args = {0};
......@@ -411,8 +413,7 @@ xrep_fix_freelist(
args.alignment = 1;
args.pag = sc->sa.pag;
return xfs_alloc_fix_freelist(&args,
can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
return xfs_alloc_fix_freelist(&args, alloc_flags);
}
/*
......@@ -1148,3 +1149,55 @@ xrep_metadata_inode_forks(
return 0;
}
/*
* Set up an in-memory buffer cache so that we can use the xfbtree. Allocating
* a shmem file might take loks, so we cannot be in transaction context. Park
* our resources in the scrub context and let the teardown function take care
* of them at the right time.
*/
int
xrep_setup_xfbtree(
struct xfs_scrub *sc,
const char *descr)
{
ASSERT(sc->tp == NULL);
return xmbuf_alloc(sc->mp, descr, &sc->xmbtp);
}
/*
* Create a dummy transaction for use in a live update hook function. This
* function MUST NOT be called from regular repair code because the current
* process' transaction is saved via the cookie.
*/
int
xrep_trans_alloc_hook_dummy(
struct xfs_mount *mp,
void **cookiep,
struct xfs_trans **tpp)
{
int error;
*cookiep = current->journal_info;
current->journal_info = NULL;
error = xfs_trans_alloc_empty(mp, tpp);
if (!error)
return 0;
current->journal_info = *cookiep;
*cookiep = NULL;
return error;
}
/* Cancel a dummy transaction used by a live update hook function. */
void
xrep_trans_cancel_hook_dummy(
void **cookiep,
struct xfs_trans *tp)
{
xfs_trans_cancel(tp);
current->journal_info = *cookiep;
*cookiep = NULL;
}
......@@ -51,7 +51,7 @@ struct xbitmap;
struct xagb_bitmap;
struct xfsb_bitmap;
int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
int xrep_fix_freelist(struct xfs_scrub *sc, int alloc_flags);
struct xrep_find_ag_btree {
/* in: rmap owner of the btree we're looking for */
......@@ -81,11 +81,14 @@ int xrep_ino_dqattach(struct xfs_scrub *sc);
# define xrep_ino_dqattach(sc) (0)
#endif /* CONFIG_XFS_QUOTA */
int xrep_setup_xfbtree(struct xfs_scrub *sc, const char *descr);
int xrep_ino_ensure_extent_count(struct xfs_scrub *sc, int whichfork,
xfs_extnum_t nextents);
int xrep_reset_perag_resv(struct xfs_scrub *sc);
int xrep_bmap(struct xfs_scrub *sc, int whichfork, bool allow_unwritten);
int xrep_metadata_inode_forks(struct xfs_scrub *sc);
int xrep_setup_ag_rmapbt(struct xfs_scrub *sc);
/* Repair setup functions */
int xrep_setup_ag_allocbt(struct xfs_scrub *sc);
......@@ -111,6 +114,7 @@ int xrep_agfl(struct xfs_scrub *sc);
int xrep_agi(struct xfs_scrub *sc);
int xrep_allocbt(struct xfs_scrub *sc);
int xrep_iallocbt(struct xfs_scrub *sc);
int xrep_rmapbt(struct xfs_scrub *sc);
int xrep_refcountbt(struct xfs_scrub *sc);
int xrep_inode(struct xfs_scrub *sc);
int xrep_bmap_data(struct xfs_scrub *sc);
......@@ -136,6 +140,10 @@ int xrep_quotacheck(struct xfs_scrub *sc);
int xrep_reinit_pagf(struct xfs_scrub *sc);
int xrep_reinit_pagi(struct xfs_scrub *sc);
int xrep_trans_alloc_hook_dummy(struct xfs_mount *mp, void **cookiep,
struct xfs_trans **tpp);
void xrep_trans_cancel_hook_dummy(void **cookiep, struct xfs_trans *tp);
#else
#define xrep_ino_dqattach(sc) (0)
......@@ -177,6 +185,7 @@ xrep_setup_nothing(
return 0;
}
#define xrep_setup_ag_allocbt xrep_setup_nothing
#define xrep_setup_ag_rmapbt xrep_setup_nothing
#define xrep_setup_inode(sc, imap) ((void)0)
......@@ -190,6 +199,7 @@ xrep_setup_nothing(
#define xrep_agi xrep_notsupported
#define xrep_allocbt xrep_notsupported
#define xrep_iallocbt xrep_notsupported
#define xrep_rmapbt xrep_notsupported
#define xrep_refcountbt xrep_notsupported
#define xrep_inode xrep_notsupported
#define xrep_bmap_data xrep_notsupported
......
......@@ -25,6 +25,7 @@
#include "scrub/btree.h"
#include "scrub/bitmap.h"
#include "scrub/agb_bitmap.h"
#include "scrub/repair.h"
/*
* Set us up to scrub reverse mapping btrees.
......@@ -36,6 +37,14 @@ xchk_setup_ag_rmapbt(
if (xchk_need_intent_drain(sc))
xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
if (xchk_could_repair(sc)) {
int error;
error = xrep_setup_ag_rmapbt(sc);
if (error)
return error;
}
return xchk_setup_ag_btree(sc, false);
}
......@@ -349,7 +358,7 @@ xchk_rmapbt_rec(
struct xfs_rmap_irec irec;
if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL ||
xfs_rmap_check_irec(bs->cur, &irec) != NULL) {
xfs_rmap_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
return 0;
}
......
This diff is collapsed.
......@@ -16,6 +16,7 @@
#include "xfs_qm.h"
#include "xfs_scrub.h"
#include "xfs_buf_mem.h"
#include "xfs_rmap.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
......@@ -164,6 +165,9 @@ xchk_fsgates_disable(
if (sc->flags & XCHK_FSGATES_DIRENTS)
xfs_dir_hook_disable();
if (sc->flags & XCHK_FSGATES_RMAP)
xfs_rmap_hook_disable();
sc->flags &= ~XCHK_FSGATES_ALL;
}
......@@ -278,7 +282,7 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
.setup = xchk_setup_ag_rmapbt,
.scrub = xchk_rmapbt,
.has = xfs_has_rmapbt,
.repair = xrep_notsupported,
.repair = xrep_rmapbt,
},
[XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
.type = ST_PERAG,
......
......@@ -126,6 +126,7 @@ struct xfs_scrub {
#define XCHK_NEED_DRAIN (1U << 3) /* scrub needs to drain defer ops */
#define XCHK_FSGATES_QUOTA (1U << 4) /* quota live update enabled */
#define XCHK_FSGATES_DIRENTS (1U << 5) /* directory live update enabled */
#define XCHK_FSGATES_RMAP (1U << 6) /* rmapbt live update enabled */
#define XREP_RESET_PERAG_RESV (1U << 30) /* must reset AG space reservation */
#define XREP_ALREADY_FIXED (1U << 31) /* checking our repair work */
......@@ -137,7 +138,8 @@ struct xfs_scrub {
*/
#define XCHK_FSGATES_ALL (XCHK_FSGATES_DRAIN | \
XCHK_FSGATES_QUOTA | \
XCHK_FSGATES_DIRENTS)
XCHK_FSGATES_DIRENTS | \
XCHK_FSGATES_RMAP)
/* Metadata scrubbers */
int xchk_tester(struct xfs_scrub *sc);
......
......@@ -18,6 +18,7 @@
#include "xfs_quota_defs.h"
#include "xfs_da_format.h"
#include "xfs_dir2.h"
#include "xfs_rmap.h"
#include "scrub/scrub.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
......
......@@ -25,6 +25,7 @@ struct xchk_dqiter;
struct xchk_iscan;
struct xchk_nlink;
struct xchk_fscounters;
struct xfs_rmap_update_params;
/*
* ftrace's __print_symbolic requires that all enum values be wrapped in the
......@@ -112,9 +113,19 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_HEALTHY);
{ XCHK_NEED_DRAIN, "need_drain" }, \
{ XCHK_FSGATES_QUOTA, "fsgates_quota" }, \
{ XCHK_FSGATES_DIRENTS, "fsgates_dirents" }, \
{ XCHK_FSGATES_RMAP, "fsgates_rmap" }, \
{ XREP_RESET_PERAG_RESV, "reset_perag_resv" }, \
{ XREP_ALREADY_FIXED, "already_fixed" }
TRACE_DEFINE_ENUM(XFS_RMAP_MAP);
TRACE_DEFINE_ENUM(XFS_RMAP_MAP_SHARED);
TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP);
TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP_SHARED);
TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT);
TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT_SHARED);
TRACE_DEFINE_ENUM(XFS_RMAP_ALLOC);
TRACE_DEFINE_ENUM(XFS_RMAP_FREE);
DECLARE_EVENT_CLASS(xchk_class,
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
int error),
......@@ -1595,7 +1606,6 @@ DEFINE_EVENT(xrep_rmap_class, name, \
uint64_t owner, uint64_t offset, unsigned int flags), \
TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
DEFINE_REPAIR_RMAP_EVENT(xrep_ibt_walk_rmap);
DEFINE_REPAIR_RMAP_EVENT(xrep_rmap_extent_fn);
DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_walk_rmap);
TRACE_EVENT(xrep_abt_found,
......@@ -1713,6 +1723,38 @@ TRACE_EVENT(xrep_bmap_found,
__entry->state)
);
TRACE_EVENT(xrep_rmap_found,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
const struct xfs_rmap_irec *rec),
TP_ARGS(mp, agno, rec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
__field(uint64_t, owner)
__field(uint64_t, offset)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agbno = rec->rm_startblock;
__entry->len = rec->rm_blockcount;
__entry->owner = rec->rm_owner;
__entry->offset = rec->rm_offset;
__entry->flags = rec->rm_flags;
),
TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
__entry->len,
__entry->owner,
__entry->offset,
__entry->flags)
);
TRACE_EVENT(xrep_findroot_block,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
uint32_t magic, uint16_t level),
......@@ -2195,6 +2237,42 @@ DEFINE_XREP_DQUOT_EVENT(xrep_quotacheck_dquot);
DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_update_inode);
DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_unfixable_inode);
TRACE_EVENT(xrep_rmap_live_update,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int op,
const struct xfs_rmap_update_params *p),
TP_ARGS(mp, agno, op, p),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(unsigned int, op)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
__field(uint64_t, owner)
__field(uint64_t, offset)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->op = op;
__entry->agbno = p->startblock;
__entry->len = p->blockcount;
xfs_owner_info_unpack(&p->oinfo, &__entry->owner,
&__entry->offset, &__entry->flags);
if (p->unwritten)
__entry->flags |= XFS_RMAP_UNWRITTEN;
),
TP_printk("dev %d:%d agno 0x%x op %d agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->op,
__entry->agbno,
__entry->len,
__entry->owner,
__entry->offset,
__entry->flags)
);
#endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */
#endif /* _TRACE_XFS_SCRUB_TRACE_H */
......
......@@ -50,7 +50,8 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{ "ibt2", xfsstats_offset(xs_fibt_2) },
{ "fibt2", xfsstats_offset(xs_rmap_2) },
{ "rmapbt", xfsstats_offset(xs_refcbt_2) },
{ "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
{ "refcntbt", xfsstats_offset(xs_rmap_mem_2) },
{ "rmapbt_mem", xfsstats_offset(xs_qm_dqreclaims)},
/* we print both series of quota information together */
{ "qm", xfsstats_offset(xs_xstrat_bytes)},
};
......
......@@ -125,6 +125,7 @@ struct __xfsstats {
uint32_t xs_fibt_2[__XBTS_MAX];
uint32_t xs_rmap_2[__XBTS_MAX];
uint32_t xs_refcbt_2[__XBTS_MAX];
uint32_t xs_rmap_mem_2[__XBTS_MAX];
uint32_t xs_qm_dqreclaims;
uint32_t xs_qm_dqreclaim_misses;
uint32_t xs_qm_dquot_dups;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment