Commit 781fca5b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-4.19-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs updates from Darrick Wong:
 "This is the second part of the XFS changes for 4.19.

  The biggest changes are the removal of buffer heads frm XFS, a massive
  reworking of the deferred transaction operations handling code, the
  removal of the long defunct barrier/nobarrier mount options, and the
  addition of a few more online repair functions.

  Summary:

   - Use extent maps to track pagecache page status instead of
     bufferhead state.

   - Refactor pagecache read and write paths to use the new iomap
     library functions, which enable us to drop the old bufferhead code
     for pagesize == blocksize filesystems.

   - Set up parallel per-block-per-page metadata to track subpage
     information that was tracked by buffer heads, which enables us to
     drop the old bufferhead code for pagesize > blocksize filesystems.

   - Tie a deferred ops control structure to a transaction so that we
     can take advantage of an upper-level dfops without having to plumb
     pointer passing through the code.

   - Refactor the deferred ops code to track deferred ops as part of the
     transaction structure (instead of as a separate data structure) so
     that we can simplify the scoping rules around defer_ops.

   - Refactor twisty delwri buffer submission code to avoid deadlocks.

   - Shorten and fix indenting problems in the scrub code.

   - Detect obviously bad summary counts at mount and fix them.

   - Directly associate deferred ops control structure with a
     transaction so that callers no longer have to manage it themselves.

   - Remove a couple of IRIX-era inode macros.

   - Remove the long-deprecated 'barrier' and 'nobarrier' mount options.

   - Clean up the inode fork structure a bit.

   - Check for bad fs summary counter values in the superblock.

   - Reduce COW fork lookups during writeback.

   - Refactor the deferred ops control structures into the transaction
     structure, thereby eliminating the need for transaction users to
     handle the deferred ops as a separate data structure.

   - Add the ability to repair AG headers online.

   - Fix a crash due to insufficient return value checking.

   - Various fixes and cleanups"

* tag 'xfs-4.19-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (155 commits)
  xfs: fix a null pointer dereference in xfs_bmap_extents_to_btree
  xfs: remove b_last_holder & associated macros
  iomap: Switch to offset_in_page for clarity
  xfs: Close race between direct IO and xfs_break_layouts()
  xfs: repair the AGI
  xfs: repair the AGFL
  xfs: repair the AGF
  xfs: remove dead error handling code in xfs_dquot_disk_alloc()
  xfs: use WRITE_ONCE to update if_seq
  xfs: fix a comment in xfs_log_reserve
  xfs: only validate summary counts on primary superblock
  xfs: substitute spaces with tabs
  xfs: fold dfops into the transaction
  xfs: always defer agfl block frees
  xfs: pass transaction to xfs_defer_add()
  xfs: replace xfs_defer_ops ->dop_pending with on-stack list
  xfs: cancel dfops on xfs_defer_finish() error
  xfs: clean out superfluous dfops dop params/vars
  xfs: drop dop param from xfs_defer_op_type ->finish_item() callback
  xfs: automatic dfops inode relogging
  ...
parents 10f3e23f 01239d77
......@@ -223,8 +223,6 @@ Deprecated Mount Options
Name Removal Schedule
---- ----------------
barrier no earlier than v4.15
nobarrier no earlier than v4.15
Removed Mount Options
......@@ -236,6 +234,8 @@ Removed Mount Options
ihashsize v4.0
irixsgid v4.0
osyncisdsync/osyncisosync v4.0
barrier v4.19
nobarrier v4.19
sysctls
......
This diff is collapsed.
......@@ -158,6 +158,7 @@ xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o
ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y)
xfs-y += $(addprefix scrub/, \
agheader_repair.o \
bitmap.o \
repair.o \
)
endif
......
......@@ -248,7 +248,8 @@ __xfs_ag_resv_init(
/* Create a per-AG block reservation. */
int
xfs_ag_resv_init(
struct xfs_perag *pag)
struct xfs_perag *pag,
struct xfs_trans *tp)
{
struct xfs_mount *mp = pag->pag_mount;
xfs_agnumber_t agno = pag->pag_agno;
......@@ -260,11 +261,11 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
error = xfs_finobt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
......@@ -282,7 +283,7 @@ xfs_ag_resv_init(
mp->m_inotbt_nores = true;
error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
&used);
if (error)
goto out;
......@@ -298,7 +299,7 @@ xfs_ag_resv_init(
if (pag->pag_rmapbt_resv.ar_asked == 0) {
ask = used = 0;
error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
error = xfs_rmapbt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
......@@ -309,7 +310,7 @@ xfs_ag_resv_init(
#ifdef DEBUG
/* need to read in the AGF for the ASSERT below to work */
error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
error = xfs_alloc_pagf_init(pag->pag_mount, tp, pag->pag_agno, 0);
if (error)
return error;
......
......@@ -7,7 +7,7 @@
#define __XFS_AG_RESV_H__
int xfs_ag_resv_free(struct xfs_perag *pag);
int xfs_ag_resv_init(struct xfs_perag *pag);
int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag,
......@@ -28,7 +28,7 @@ xfs_ag_resv_rmapbt_alloc(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
struct xfs_alloc_arg args = {0};
struct xfs_alloc_arg args = { NULL };
struct xfs_perag *pag;
args.len = 1;
......
......@@ -2198,12 +2198,12 @@ xfs_agfl_reset(
*/
STATIC void
xfs_defer_agfl_block(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_fsblock_t agbno,
struct xfs_owner_info *oinfo)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_extent_free_item *new; /* new element */
ASSERT(xfs_bmap_free_item_zone != NULL);
......@@ -2216,7 +2216,7 @@ xfs_defer_agfl_block(
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
}
/*
......@@ -2323,16 +2323,8 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
/* defer agfl frees if dfops is provided */
if (tp->t_agfl_dfops) {
xfs_defer_agfl_block(mp, tp->t_agfl_dfops, args->agno,
bno, &targs.oinfo);
} else {
error = xfs_free_agfl_block(tp, args->agno, bno, agbp,
&targs.oinfo);
if (error)
goto out_agbp_relse;
}
/* defer agfl frees */
xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
}
targs.tp = tp;
......@@ -2755,9 +2747,6 @@ xfs_alloc_read_agf(
pag->pagf_levels[XFS_BTNUM_RMAPi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
spin_lock_init(&pag->pagb_lock);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
}
......@@ -2784,16 +2773,16 @@ xfs_alloc_read_agf(
*/
int /* error */
xfs_alloc_vextent(
xfs_alloc_arg_t *args) /* allocation argument structure */
struct xfs_alloc_arg *args) /* allocation argument structure */
{
xfs_agblock_t agsize; /* allocation group size */
int error;
int flags; /* XFS_ALLOC_FLAG_... locking flags */
xfs_mount_t *mp; /* mount structure pointer */
xfs_agnumber_t sagno; /* starting allocation group number */
xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
xfs_agblock_t agsize; /* allocation group size */
int error;
int flags; /* XFS_ALLOC_FLAG_... locking flags */
struct xfs_mount *mp; /* mount structure pointer */
xfs_agnumber_t sagno; /* starting allocation group number */
xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
type = args->otype = args->type;
......@@ -2914,7 +2903,7 @@ xfs_alloc_vextent(
* locking of AGF, which might cause deadlock.
*/
if (++(args->agno) == mp->m_sb.sb_agcount) {
if (args->firstblock != NULLFSBLOCK)
if (args->tp->t_firstblock != NULLFSBLOCK)
args->agno = sagno;
else
args->agno = 0;
......
......@@ -74,7 +74,6 @@ typedef struct xfs_alloc_arg {
int datatype; /* mask defining data type treatment */
char wasdel; /* set if allocation was prev delayed */
char wasfromfl; /* set if allocation is from freelist */
xfs_fsblock_t firstblock; /* io first block allocated */
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
enum xfs_ag_resv_type resv; /* block reservation to use */
} xfs_alloc_arg_t;
......
......@@ -202,9 +202,7 @@ xfs_attr_set(
struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *leaf_bp = NULL;
struct xfs_da_args args;
struct xfs_defer_ops dfops;
struct xfs_trans_res tres;
xfs_fsblock_t firstblock;
int rsvd = (flags & ATTR_ROOT) != 0;
int error, err2, local;
......@@ -219,8 +217,6 @@ xfs_attr_set(
args.value = value;
args.valuelen = valuelen;
args.firstblock = &firstblock;
args.dfops = &dfops;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
args.total = xfs_attr_calc_size(&args, &local);
......@@ -315,21 +311,18 @@ xfs_attr_set(
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
xfs_defer_init(args.dfops, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
if (error)
goto out_defer_cancel;
goto out;
/*
* Prevent the leaf buffer from being unlocked so that a
* concurrent AIL push cannot grab the half-baked leaf
* buffer and run into problems with the write verifier.
*/
xfs_trans_bhold(args.trans, leaf_bp);
xfs_defer_bjoin(args.dfops, leaf_bp);
xfs_defer_ijoin(args.dfops, dp);
error = xfs_defer_finish(&args.trans, args.dfops);
error = xfs_defer_finish(&args.trans);
if (error)
goto out_defer_cancel;
goto out;
/*
* Commit the leaf transformation. We'll need another (linked)
......@@ -369,8 +362,6 @@ xfs_attr_set(
return error;
out_defer_cancel:
xfs_defer_cancel(&dfops);
out:
if (leaf_bp)
xfs_trans_brelse(args.trans, leaf_bp);
......@@ -392,8 +383,6 @@ xfs_attr_remove(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
struct xfs_defer_ops dfops;
xfs_fsblock_t firstblock;
int error;
XFS_STATS_INC(mp, xs_attr_remove);
......@@ -405,9 +394,6 @@ xfs_attr_remove(
if (error)
return error;
args.firstblock = &firstblock;
args.dfops = &dfops;
/*
* we have no control over the attribute names that userspace passes us
* to remove, so we have to allow the name lookup prior to attribute
......@@ -536,11 +522,12 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
xfs_attr_leaf_addname(xfs_da_args_t *args)
xfs_attr_leaf_addname(
struct xfs_da_args *args)
{
xfs_inode_t *dp;
struct xfs_buf *bp;
int retval, error, forkoff;
struct xfs_inode *dp;
struct xfs_buf *bp;
int retval, error, forkoff;
trace_xfs_attr_leaf_addname(args);
......@@ -598,14 +585,12 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
return error;
/*
* Commit the current trans (including the inode) and start
......@@ -687,15 +672,13 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
return error;
}
/*
......@@ -711,7 +694,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
}
return error;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
xfs_defer_cancel(args->trans);
return error;
}
......@@ -722,11 +705,12 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
xfs_attr_leaf_removename(xfs_da_args_t *args)
xfs_attr_leaf_removename(
struct xfs_da_args *args)
{
xfs_inode_t *dp;
struct xfs_buf *bp;
int error, forkoff;
struct xfs_inode *dp;
struct xfs_buf *bp;
int error, forkoff;
trace_xfs_attr_leaf_removename(args);
......@@ -751,19 +735,17 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
return error;
}
return 0;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
xfs_defer_cancel(args->trans);
return error;
}
......@@ -814,13 +796,14 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
* add a whole extra layer of confusion on top of that.
*/
STATIC int
xfs_attr_node_addname(xfs_da_args_t *args)
xfs_attr_node_addname(
struct xfs_da_args *args)
{
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
xfs_mount_t *mp;
int retval, error;
struct xfs_da_state *state;
struct xfs_da_state_blk *blk;
struct xfs_inode *dp;
struct xfs_mount *mp;
int retval, error;
trace_xfs_attr_node_addname(args);
......@@ -879,14 +862,12 @@ xfs_attr_node_addname(xfs_da_args_t *args)
*/
xfs_da_state_free(state);
state = NULL;
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
goto out;
/*
* Commit the node conversion and start the next
......@@ -905,14 +886,12 @@ xfs_attr_node_addname(xfs_da_args_t *args)
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_split(state);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
goto out;
} else {
/*
* Addition succeeded, update Btree hashvals.
......@@ -1003,14 +982,12 @@ xfs_attr_node_addname(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
goto out;
}
/*
......@@ -1037,7 +1014,7 @@ xfs_attr_node_addname(xfs_da_args_t *args)
return error;
return retval;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
xfs_defer_cancel(args->trans);
goto out;
}
......@@ -1049,13 +1026,14 @@ xfs_attr_node_addname(xfs_da_args_t *args)
* the root node (a special case of an intermediate node).
*/
STATIC int
xfs_attr_node_removename(xfs_da_args_t *args)
xfs_attr_node_removename(
struct xfs_da_args *args)
{
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
struct xfs_buf *bp;
int retval, error, forkoff;
struct xfs_da_state *state;
struct xfs_da_state_blk *blk;
struct xfs_inode *dp;
struct xfs_buf *bp;
int retval, error, forkoff;
trace_xfs_attr_node_removename(args);
......@@ -1127,14 +1105,12 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
goto out;
/*
* Commit the Btree join operation and start a new trans.
*/
......@@ -1159,15 +1135,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
goto out;
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
goto out;
} else
xfs_trans_brelse(args->trans, bp);
}
......@@ -1177,7 +1151,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
xfs_da_state_free(state);
return error;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
xfs_defer_cancel(args->trans);
goto out;
}
......
......@@ -242,8 +242,9 @@ xfs_attr3_leaf_verify(
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_perag *pag = bp->b_pag;
struct xfs_attr_leaf_entry *entries;
uint16_t end;
int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
......@@ -268,7 +269,7 @@ xfs_attr3_leaf_verify(
* because we may have transitioned an empty shortform attr to a leaf
* if the attr didn't fit in shortform.
*/
if (pag && pag->pagf_init && ichdr.count == 0)
if (!xfs_log_in_recovery(mp) && ichdr.count == 0)
return __this_address;
/*
......@@ -289,6 +290,26 @@ xfs_attr3_leaf_verify(
/* XXX: need to range check rest of attr header values */
/* XXX: hash order check? */
/*
* Quickly check the freemap information. Attribute data has to be
* aligned to 4-byte boundaries, and likewise for the free space.
*/
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
return __this_address;
if (ichdr.freemap[i].base & 0x3)
return __this_address;
if (ichdr.freemap[i].size > mp->m_attr_geo->blksize)
return __this_address;
if (ichdr.freemap[i].size & 0x3)
return __this_address;
end = ichdr.freemap[i].base + ichdr.freemap[i].size;
if (end < ichdr.freemap[i].base)
return __this_address;
if (end > mp->m_attr_geo->blksize)
return __this_address;
}
return NULL;
}
......@@ -506,7 +527,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
{
xfs_attr_sf_hdr_t *hdr;
xfs_inode_t *dp;
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
trace_xfs_attr_sf_create(args);
......@@ -541,7 +562,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
int i, offset, size;
xfs_mount_t *mp;
xfs_inode_t *dp;
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
trace_xfs_attr_sf_add(args);
......@@ -682,7 +703,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
trace_xfs_attr_sf_lookup(args);
......@@ -747,18 +768,18 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
*/
int
xfs_attr_shortform_to_leaf(
struct xfs_da_args *args,
struct xfs_buf **leaf_bp)
struct xfs_da_args *args,
struct xfs_buf **leaf_bp)
{
xfs_inode_t *dp;
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
xfs_da_args_t nargs;
char *tmpbuffer;
int error, i, size;
xfs_dablk_t blkno;
struct xfs_buf *bp;
xfs_ifork_t *ifp;
struct xfs_inode *dp;
struct xfs_attr_shortform *sf;
struct xfs_attr_sf_entry *sfe;
struct xfs_da_args nargs;
char *tmpbuffer;
int error, i, size;
xfs_dablk_t blkno;
struct xfs_buf *bp;
struct xfs_ifork *ifp;
trace_xfs_attr_sf_to_leaf(args);
......@@ -802,8 +823,6 @@ xfs_attr_shortform_to_leaf(
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.geo = args->geo;
nargs.firstblock = args->firstblock;
nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
......@@ -1006,8 +1025,6 @@ xfs_attr3_leaf_to_shortform(
memset((char *)&nargs, 0, sizeof(nargs));
nargs.geo = args->geo;
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
......@@ -1570,17 +1587,10 @@ xfs_attr3_leaf_rebalance(
*/
swap = 0;
if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
struct xfs_da_state_blk *tmp_blk;
struct xfs_attr3_icleaf_hdr tmp_ichdr;
tmp_blk = blk1;
blk1 = blk2;
blk2 = tmp_blk;
swap(blk1, blk2);
/* struct copies to swap them rather than reconverting */
tmp_ichdr = ichdr1;
ichdr1 = ichdr2;
ichdr2 = tmp_ichdr;
/* swap structures rather than reconverting them */
swap(ichdr1, ichdr2);
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
......
......@@ -480,17 +480,15 @@ xfs_attr_rmtval_set(
* extent and then crash then the block may not contain the
* correct metadata after log recovery occurs.
*/
xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
args->total, &map, &nmap, args->dfops);
blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
&nmap);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
return error;
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
......@@ -522,7 +520,6 @@ xfs_attr_rmtval_set(
ASSERT(blkcnt > 0);
xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap,
......@@ -557,8 +554,7 @@ xfs_attr_rmtval_set(
ASSERT(valuelen == 0);
return 0;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
args->trans = NULL;
xfs_defer_cancel(args->trans);
return error;
}
......@@ -626,16 +622,13 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
xfs_defer_init(args->dfops, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK, 1, args->firstblock,
args->dfops, &done);
XFS_BMAPI_ATTRFORK, 1, &done);
if (error)
goto out_defer_cancel;
xfs_defer_ijoin(args->dfops, args->dp);
error = xfs_defer_finish(&args->trans, args->dfops);
error = xfs_defer_finish(&args->trans);
if (error)
goto out_defer_cancel;
return error;
/*
* Close out trans and start the next one in the chain.
......@@ -646,7 +639,6 @@ xfs_attr_rmtval_remove(
}
return 0;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
args->trans = NULL;
xfs_defer_cancel(args->trans);
return error;
}
This diff is collapsed.
......@@ -19,8 +19,6 @@ extern kmem_zone_t *xfs_bmap_free_item_zone;
* Argument structure for xfs_bmap_alloc.
*/
struct xfs_bmalloca {
xfs_fsblock_t *firstblock; /* i/o first block allocated */
struct xfs_defer_ops *dfops; /* bmap freelist */
struct xfs_trans *tp; /* transaction pointer */
struct xfs_inode *ip; /* incore inode pointer */
struct xfs_bmbt_irec prev; /* extent before the new one */
......@@ -68,8 +66,6 @@ struct xfs_extent_free_item
#define XFS_BMAPI_METADATA 0x002 /* mapping metadata not user data */
#define XFS_BMAPI_ATTRFORK 0x004 /* use attribute fork not data */
#define XFS_BMAPI_PREALLOC 0x008 /* preallocation op: unwritten space */
#define XFS_BMAPI_IGSTATE 0x010 /* Ignore state - */
/* combine contig. space */
#define XFS_BMAPI_CONTIG 0x020 /* must allocate only one extent */
/*
* unwritten extent conversion - this needs write cache flushing and no additional
......@@ -116,7 +112,6 @@ struct xfs_extent_free_item
{ XFS_BMAPI_METADATA, "METADATA" }, \
{ XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
{ XFS_BMAPI_IGSTATE, "IGSTATE" }, \
{ XFS_BMAPI_CONTIG, "CONTIG" }, \
{ XFS_BMAPI_CONVERT, "CONVERT" }, \
{ XFS_BMAPI_ZERO, "ZERO" }, \
......@@ -189,9 +184,9 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
xfs_fsblock_t bno, xfs_filblks_t len,
struct xfs_owner_info *oinfo, bool skip_discard);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, struct xfs_owner_info *oinfo,
bool skip_discard);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
......@@ -205,17 +200,13 @@ int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
int *nmap, int flags);
int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_fsblock_t *firstblock, xfs_extlen_t total,
struct xfs_bmbt_irec *mval, int *nmap,
struct xfs_defer_ops *dfops);
xfs_extlen_t total, struct xfs_bmbt_irec *mval, int *nmap);
int __xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t *rlen, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
xfs_extnum_t nexts);
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops, int *done);
xfs_extnum_t nexts, int *done);
int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del);
......@@ -225,14 +216,12 @@ void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
bool *done);
int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
xfs_fileoff_t shift);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
bool *done, xfs_fileoff_t stop_fsb);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
......@@ -241,13 +230,12 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
static inline void
xfs_bmap_add_free(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
struct xfs_owner_info *oinfo)
{
__xfs_bmap_add_free(mp, dfops, bno, len, oinfo, false);
__xfs_bmap_add_free(tp, bno, len, oinfo, false);
}
enum xfs_bmap_intent_type {
......@@ -263,14 +251,14 @@ struct xfs_bmap_intent {
struct xfs_bmbt_irec bi_bmap;
};
int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_defer_ops *dfops,
struct xfs_inode *ip, enum xfs_bmap_intent_type type,
int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_inode *ip,
enum xfs_bmap_intent_type type, int whichfork,
xfs_fileoff_t startoff, xfs_fsblock_t startblock,
xfs_filblks_t *blockcount, xfs_exntst_t state);
int xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
int xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
int xfs_bmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
struct xfs_bmbt_irec *imap);
int xfs_bmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
struct xfs_bmbt_irec *imap);
static inline int xfs_bmap_fork_to_state(int whichfork)
{
......@@ -289,6 +277,6 @@ xfs_failaddr_t xfs_bmap_validate_extent(struct xfs_inode *ip, int whichfork,
int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock,
struct xfs_defer_ops *dfops, int flags);
int flags);
#endif /* __XFS_BMAP_H__ */
......@@ -175,8 +175,6 @@ xfs_bmbt_dup_cursor(
* Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
new->bc_private.b.dfops = cur->bc_private.b.dfops;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
......@@ -187,12 +185,11 @@ xfs_bmbt_update_cursor(
struct xfs_btree_cur *src,
struct xfs_btree_cur *dst)
{
ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
dst->bc_private.b.allocated += src->bc_private.b.allocated;
dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
src->bc_private.b.allocated = 0;
}
......@@ -210,8 +207,7 @@ xfs_bmbt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.fsbno = cur->bc_private.b.firstblock;
args.firstblock = args.fsbno;
args.fsbno = cur->bc_tp->t_firstblock;
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
cur->bc_private.b.whichfork);
......@@ -230,7 +226,7 @@ xfs_bmbt_alloc_block(
* block allocation here and corrupt the filesystem.
*/
args.minleft = args.tp->t_blk_res;
} else if (cur->bc_private.b.dfops->dop_low) {
} else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
......@@ -257,7 +253,7 @@ xfs_bmbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
cur->bc_private.b.dfops->dop_low = true;
cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
}
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
*stat = 0;
......@@ -265,7 +261,7 @@ xfs_bmbt_alloc_block(
}
ASSERT(args.len == 1);
cur->bc_private.b.firstblock = args.fsbno;
cur->bc_tp->t_firstblock = args.fsbno;
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
......@@ -293,7 +289,7 @@ xfs_bmbt_free_block(
struct xfs_owner_info oinfo;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
......@@ -564,8 +560,6 @@ xfs_bmbt_init_cursor(
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
cur->bc_private.b.firstblock = NULLFSBLOCK;
cur->bc_private.b.dfops = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
......@@ -645,7 +639,7 @@ xfs_bmbt_change_owner(
cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cur, error);
return error;
}
......
......@@ -7,7 +7,6 @@
#define __XFS_BTREE_H__
struct xfs_buf;
struct xfs_defer_ops;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
......@@ -209,14 +208,11 @@ typedef struct xfs_btree_cur
union {
struct { /* needed for BNO, CNT, INO */
struct xfs_buf *agbp; /* agf/agi buffer pointer */
struct xfs_defer_ops *dfops; /* deferred updates */
xfs_agnumber_t agno; /* ag number */
union xfs_btree_cur_private priv;
} a;
struct { /* needed for BMAP */
struct xfs_inode *ip; /* pointer to our inode */
struct xfs_defer_ops *dfops; /* deferred updates */
xfs_fsblock_t firstblock; /* 1st blk allocated */
int allocated; /* count of alloced */
short forksize; /* fork's inode space */
char whichfork; /* data or attr fork */
......
......@@ -1481,6 +1481,7 @@ xfs_da3_node_lookup_int(
int error;
int retval;
unsigned int expected_level = 0;
uint16_t magic;
struct xfs_inode *dp = state->args->dp;
args = state->args;
......@@ -1505,25 +1506,27 @@ xfs_da3_node_lookup_int(
return error;
}
curr = blk->bp->b_addr;
blk->magic = be16_to_cpu(curr->magic);
magic = be16_to_cpu(curr->magic);
if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
blk->magic == XFS_ATTR3_LEAF_MAGIC) {
if (magic == XFS_ATTR_LEAF_MAGIC ||
magic == XFS_ATTR3_LEAF_MAGIC) {
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
}
if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
blk->magic == XFS_DIR3_LEAFN_MAGIC) {
if (magic == XFS_DIR2_LEAFN_MAGIC ||
magic == XFS_DIR3_LEAFN_MAGIC) {
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
blk->bp, NULL);
break;
}
blk->magic = XFS_DA_NODE_MAGIC;
if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
return -EFSCORRUPTED;
blk->magic = XFS_DA_NODE_MAGIC;
/*
* Search an intermediate node for a match.
......@@ -2059,11 +2062,9 @@ xfs_da_grow_inode_int(
* Try mapping it in one filesystem block.
*/
nmap = 1;
ASSERT(args->firstblock != NULL);
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
args->dfops);
args->total, &map, &nmap);
if (error)
return error;
......@@ -2085,8 +2086,7 @@ xfs_da_grow_inode_int(
c = (int)(*bno + count - b);
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
args->firstblock, args->total,
&mapp[mapi], &nmap, args->dfops);
args->total, &mapp[mapi], &nmap);
if (error)
goto out_free_map;
if (nmap < 1)
......@@ -2375,13 +2375,13 @@ xfs_da3_swap_lastblock(
*/
int
xfs_da_shrink_inode(
xfs_da_args_t *args,
xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf)
struct xfs_da_args *args,
xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf)
{
xfs_inode_t *dp;
int done, error, w, count;
xfs_trans_t *tp;
struct xfs_inode *dp;
int done, error, w, count;
struct xfs_trans *tp;
trace_xfs_da_shrink_inode(args);
......@@ -2395,8 +2395,7 @@ xfs_da_shrink_inode(
* the last block to the place we want to kill.
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w), 0, args->firstblock,
args->dfops, &done);
xfs_bmapi_aflag(w), 0, &done);
if (error == -ENOSPC) {
if (w != XFS_DATA_FORK)
break;
......
......@@ -7,7 +7,6 @@
#ifndef __XFS_DA_BTREE_H__
#define __XFS_DA_BTREE_H__
struct xfs_defer_ops;
struct xfs_inode;
struct xfs_trans;
struct zone;
......@@ -57,8 +56,6 @@ typedef struct xfs_da_args {
xfs_dahash_t hashval; /* hash value of name */
xfs_ino_t inumber; /* input/output inode number */
struct xfs_inode *dp; /* directory inode to manipulate */
xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */
struct xfs_defer_ops *dfops; /* ptr to freelist for bmap_finish */
struct xfs_trans *trans; /* current trans (changes over time) */
xfs_extlen_t total; /* total blocks needed, for 1st bmap */
int whichfork; /* data or attribute fork */
......
This diff is collapsed.
......@@ -24,17 +24,6 @@ struct xfs_defer_pending {
/*
* Header for deferred operation list.
*
* dop_low is used by the allocator to activate the lowspace algorithm -
* when free space is running low the extent allocator may choose to
* allocate an extent from an AG without leaving sufficient space for
* a btree split when inserting the new extent. In this case the allocator
* will enable the lowspace algorithm which is supposed to allow further
* allocations (such as btree splits and newroots) to allocate from
* sequential AGs. In order to avoid locking AGs out of order the lowspace
* algorithm will start searching for free space from AG 0. If the correct
* transaction reservations have been made then this algorithm will eventually
* find all the space it needs.
*/
enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_BMAP,
......@@ -45,28 +34,12 @@ enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_MAX,
};
#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
struct xfs_defer_ops {
bool dop_committed; /* did any trans commit? */
bool dop_low; /* alloc in low mode */
struct list_head dop_intake; /* unlogged pending work */
struct list_head dop_pending; /* logged pending work */
/* relog these with each roll */
struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
};
void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type,
struct list_head *h);
int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop);
void xfs_defer_cancel(struct xfs_defer_ops *dop);
void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
int xfs_defer_finish_noroll(struct xfs_trans **tp);
int xfs_defer_finish(struct xfs_trans **tp);
void xfs_defer_cancel(struct xfs_trans *);
void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
......@@ -74,8 +47,8 @@ struct xfs_defer_op_type {
unsigned int max_items;
void (*abort_intent)(void *);
void *(*create_done)(struct xfs_trans *, void *, unsigned int);
int (*finish_item)(struct xfs_trans *, struct xfs_defer_ops *,
struct list_head *, void *, void **);
int (*finish_item)(struct xfs_trans *, struct list_head *, void *,
void **);
void (*finish_cleanup)(struct xfs_trans *, void *, int);
void (*cancel_item)(struct list_head *);
int (*diff_items)(void *, struct list_head *, struct list_head *);
......
......@@ -239,12 +239,10 @@ xfs_dir_init(
*/
int
xfs_dir_createname(
xfs_trans_t *tp,
xfs_inode_t *dp,
struct xfs_trans *tp,
struct xfs_inode *dp,
struct xfs_name *name,
xfs_ino_t inum, /* new entry inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
......@@ -252,6 +250,7 @@ xfs_dir_createname(
int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
if (inum) {
rval = xfs_dir_ino_validate(tp->t_mountp, inum);
if (rval)
......@@ -270,8 +269,6 @@ xfs_dir_createname(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = inum;
args->dp = dp;
args->firstblock = first;
args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
......@@ -416,17 +413,15 @@ xfs_dir_lookup(
*/
int
xfs_dir_removename(
xfs_trans_t *tp,
xfs_inode_t *dp,
struct xfs_name *name,
xfs_ino_t ino,
xfs_fsblock_t *first, /* bmap's firstblock */
struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
struct xfs_trans *tp,
struct xfs_inode *dp,
struct xfs_name *name,
xfs_ino_t ino,
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
int rval;
int v; /* type-checking value */
struct xfs_da_args *args;
int rval;
int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
......@@ -442,8 +437,6 @@ xfs_dir_removename(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = ino;
args->dp = dp;
args->firstblock = first;
args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
......@@ -478,17 +471,15 @@ xfs_dir_removename(
*/
int
xfs_dir_replace(
xfs_trans_t *tp,
xfs_inode_t *dp,
struct xfs_name *name, /* name of entry to replace */
xfs_ino_t inum, /* new inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
struct xfs_trans *tp,
struct xfs_inode *dp,
struct xfs_name *name, /* name of entry to replace */
xfs_ino_t inum, /* new inode number */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
int rval;
int v; /* type-checking value */
struct xfs_da_args *args;
int rval;
int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
......@@ -507,8 +498,6 @@ xfs_dir_replace(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = inum;
args->dp = dp;
args->firstblock = first;
args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
......@@ -547,7 +536,7 @@ xfs_dir_canenter(
xfs_inode_t *dp,
struct xfs_name *name) /* name of entry to add */
{
return xfs_dir_createname(tp, dp, name, 0, NULL, NULL, 0);
return xfs_dir_createname(tp, dp, name, 0, 0);
}
/*
......@@ -645,17 +634,17 @@ xfs_dir2_isleaf(
*/
int
xfs_dir2_shrink_inode(
xfs_da_args_t *args,
xfs_dir2_db_t db,
struct xfs_buf *bp)
struct xfs_da_args *args,
xfs_dir2_db_t db,
struct xfs_buf *bp)
{
xfs_fileoff_t bno; /* directory file offset */
xfs_dablk_t da; /* directory file offset */
int done; /* bunmap is finished */
xfs_inode_t *dp;
int error;
xfs_mount_t *mp;
xfs_trans_t *tp;
xfs_fileoff_t bno; /* directory file offset */
xfs_dablk_t da; /* directory file offset */
int done; /* bunmap is finished */
struct xfs_inode *dp;
int error;
struct xfs_mount *mp;
struct xfs_trans *tp;
trace_xfs_dir2_shrink_inode(args, db);
......@@ -665,8 +654,7 @@ xfs_dir2_shrink_inode(
da = xfs_dir2_db_to_da(args->geo, db);
/* Unmap the fsblock(s). */
error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0,
args->firstblock, args->dfops, &done);
error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0, &done);
if (error) {
/*
* ENOSPC actually can happen if we're in a removename with no
......
......@@ -9,7 +9,6 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
struct xfs_mount;
......@@ -118,19 +117,16 @@ extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_inode *pdp);
extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_fsblock_t *first,
struct xfs_defer_ops *dfops, xfs_extlen_t tot);
xfs_extlen_t tot);
extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t *inum,
struct xfs_name *ci_name);
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_fsblock_t *first,
struct xfs_defer_ops *dfops, xfs_extlen_t tot);
xfs_extlen_t tot);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_fsblock_t *first,
struct xfs_defer_ops *dfops, xfs_extlen_t tot);
xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name);
......
......@@ -1012,7 +1012,7 @@ xfs_dir2_leafn_rebalance(
int oldstale; /* old count of stale leaves */
#endif
int oldsum; /* old total leaf count */
int swap; /* swapped leaf blocks */
int swap_blocks; /* swapped leaf blocks */
struct xfs_dir2_leaf_entry *ents1;
struct xfs_dir2_leaf_entry *ents2;
struct xfs_dir3_icleaf_hdr hdr1;
......@@ -1023,13 +1023,10 @@ xfs_dir2_leafn_rebalance(
/*
* If the block order is wrong, swap the arguments.
*/
if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) {
xfs_da_state_blk_t *tmp; /* temp for block swap */
swap_blocks = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp);
if (swap_blocks)
swap(blk1, blk2);
tmp = blk1;
blk1 = blk2;
blk2 = tmp;
}
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
......@@ -1093,11 +1090,11 @@ xfs_dir2_leafn_rebalance(
* Mark whether we're inserting into the old or new leaf.
*/
if (hdr1.count < hdr2.count)
state->inleaf = swap;
state->inleaf = swap_blocks;
else if (hdr1.count > hdr2.count)
state->inleaf = !swap;
state->inleaf = !swap_blocks;
else
state->inleaf = swap ^ (blk1->index <= hdr1.count);
state->inleaf = swap_blocks ^ (blk1->index <= hdr1.count);
/*
* Adjust the expected index for insertion.
*/
......
......@@ -53,7 +53,8 @@
#define XFS_ERRTAG_LOG_ITEM_PIN 30
#define XFS_ERRTAG_BUF_LRU_REF 31
#define XFS_ERRTAG_FORCE_SCRUB_REPAIR 32
#define XFS_ERRTAG_MAX 33
#define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33
#define XFS_ERRTAG_MAX 34
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
......@@ -91,5 +92,6 @@
#define XFS_RANDOM_LOG_ITEM_PIN 1
#define XFS_RANDOM_BUF_LRU_REF 2
#define XFS_RANDOM_FORCE_SCRUB_REPAIR 1
#define XFS_RANDOM_FORCE_SUMMARY_RECALC 1
#endif /* __XFS_ERRORTAG_H_ */
......@@ -1838,23 +1838,24 @@ xfs_dialloc(
*/
STATIC void
xfs_difree_inode_chunk(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_agnumber_t agno,
struct xfs_inobt_rec_incore *rec,
struct xfs_defer_ops *dfops)
struct xfs_inobt_rec_incore *rec)
{
xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
int startidx, endidx;
int nextbit;
xfs_agblock_t agbno;
int contigblk;
struct xfs_owner_info oinfo;
struct xfs_mount *mp = tp->t_mountp;
xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
rec->ir_startino);
int startidx, endidx;
int nextbit;
xfs_agblock_t agbno;
int contigblk;
struct xfs_owner_info oinfo;
DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, sagbno),
xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
mp->m_ialloc_blks, &oinfo);
return;
}
......@@ -1898,7 +1899,7 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, agbno),
xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
contigblk, &oinfo);
/* reset range to current bit and carry on... */
......@@ -1915,7 +1916,6 @@ xfs_difree_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agino_t agino,
struct xfs_defer_ops *dfops,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
......@@ -2003,7 +2003,7 @@ xfs_difree_inobt(
goto error0;
}
xfs_difree_inode_chunk(mp, agno, &rec, dfops);
xfs_difree_inode_chunk(tp, agno, &rec);
} else {
xic->deleted = false;
......@@ -2148,7 +2148,6 @@ int
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *xic) /* cluster info if deleted */
{
/* REFERENCED */
......@@ -2200,7 +2199,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
error = xfs_difree_inobt(mp, tp, agbp, agino, dfops, xic, &rec);
error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
if (error)
goto error0;
......@@ -2260,7 +2259,7 @@ xfs_imap_lookup(
}
xfs_trans_brelse(tp, agbp);
xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cur, error);
if (error)
return error;
......@@ -2539,7 +2538,7 @@ xfs_agi_verify(
return __this_address;
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
if (agi->agi_unlinked[i] == NULLAGINO)
if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
continue;
if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
return __this_address;
......
......@@ -82,7 +82,6 @@ int /* error */
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *ifree); /* cluster info if deleted */
/*
......
......@@ -552,6 +552,7 @@ xfs_inobt_max_size(
static int
xfs_inobt_count_blocks(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks)
......@@ -560,14 +561,14 @@ xfs_inobt_count_blocks(
struct xfs_btree_cur *cur;
int error;
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
if (error)
return error;
cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
error = xfs_btree_count_blocks(cur, tree_blocks);
xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
xfs_buf_relse(agbp);
xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
return error;
}
......@@ -578,6 +579,7 @@ xfs_inobt_count_blocks(
int
xfs_finobt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
......@@ -588,7 +590,7 @@ xfs_finobt_calc_reserves(
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
return 0;
error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO, &tree_len);
if (error)
return error;
......
......@@ -60,8 +60,8 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */
int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_extlen_t *ask, xfs_extlen_t *used);
int xfs_finobt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
......
......@@ -14,6 +14,7 @@
#include "xfs_inode_fork.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_bmap.h"
#include "xfs_trace.h"
/*
......@@ -612,6 +613,19 @@ xfs_iext_realloc_root(
cur->leaf = new;
}
/*
* Increment the sequence counter if we are on a COW fork. This allows
* the writeback code to skip looking for a COW extent if the COW fork
* hasn't changed. We use WRITE_ONCE here to ensure the update to the
* sequence counter is seen before the modifications to the extent
* tree itself take effect.
*/
static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp, int state)
{
if (state & BMAP_COWFORK)
WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
}
void
xfs_iext_insert(
struct xfs_inode *ip,
......@@ -624,6 +638,8 @@ xfs_iext_insert(
struct xfs_iext_leaf *new = NULL;
int nr_entries, i;
xfs_iext_inc_seq(ifp, state);
if (ifp->if_height == 0)
xfs_iext_alloc_root(ifp, cur);
else if (ifp->if_height == 1)
......@@ -864,6 +880,8 @@ xfs_iext_remove(
ASSERT(ifp->if_u1.if_root != NULL);
ASSERT(xfs_iext_valid(ifp, cur));
xfs_iext_inc_seq(ifp, state);
nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
for (i = cur->pos; i < nr_entries; i++)
leaf->recs[i] = leaf->recs[i + 1];
......@@ -970,6 +988,8 @@ xfs_iext_update_extent(
{
struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
xfs_iext_inc_seq(ifp, state);
if (cur->pos == 0) {
struct xfs_bmbt_irec old;
......
......@@ -158,7 +158,6 @@ xfs_init_local_fork(
}
ifp->if_bytes = size;
ifp->if_real_bytes = real_size;
ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
ifp->if_flags |= XFS_IFINLINE;
}
......@@ -226,7 +225,6 @@ xfs_iformat_extents(
return -EFSCORRUPTED;
}
ifp->if_real_bytes = 0;
ifp->if_bytes = 0;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
......@@ -271,7 +269,7 @@ xfs_iformat_btree(
{
struct xfs_mount *mp = ip->i_mount;
xfs_bmdr_block_t *dfp;
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
/* REFERENCED */
int nrecs;
int size;
......@@ -317,7 +315,6 @@ xfs_iformat_btree(
ifp->if_flags &= ~XFS_IFEXTENTS;
ifp->if_flags |= XFS_IFBROOT;
ifp->if_real_bytes = 0;
ifp->if_bytes = 0;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
......@@ -350,7 +347,7 @@ xfs_iroot_realloc(
{
struct xfs_mount *mp = ip->i_mount;
int cur_max;
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
struct xfs_btree_block *new_broot;
int new_max;
size_t new_size;
......@@ -471,55 +468,34 @@ xfs_iroot_realloc(
*/
void
xfs_idata_realloc(
xfs_inode_t *ip,
int byte_diff,
int whichfork)
struct xfs_inode *ip,
int byte_diff,
int whichfork)
{
xfs_ifork_t *ifp;
int new_size;
int real_size;
if (byte_diff == 0) {
return;
}
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
int new_size = (int)ifp->if_bytes + byte_diff;
ifp = XFS_IFORK_PTR(ip, whichfork);
new_size = (int)ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0);
ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
if (byte_diff == 0)
return;
if (new_size == 0) {
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
real_size = 0;
} else {
/*
* Stuck with malloc/realloc.
* For inline data, the underlying buffer must be
* a multiple of 4 bytes in size so that it can be
* logged and stay on word boundaries. We enforce
* that here.
*/
real_size = roundup(new_size, 4);
if (ifp->if_u1.if_data == NULL) {
ASSERT(ifp->if_real_bytes == 0);
ifp->if_u1.if_data = kmem_alloc(real_size,
KM_SLEEP | KM_NOFS);
} else {
/*
* Only do the realloc if the underlying size
* is really changing.
*/
if (ifp->if_real_bytes != real_size) {
ifp->if_u1.if_data =
kmem_realloc(ifp->if_u1.if_data,
real_size,
KM_SLEEP | KM_NOFS);
}
}
ifp->if_bytes = 0;
return;
}
ifp->if_real_bytes = real_size;
/*
* For inline data, the underlying buffer must be a multiple of 4 bytes
* in size so that it can be logged and stay on word boundaries.
* We enforce that here.
*/
ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data,
roundup(new_size, 4), KM_SLEEP | KM_NOFS);
ifp->if_bytes = new_size;
ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
}
void
......@@ -527,7 +503,7 @@ xfs_idestroy_fork(
xfs_inode_t *ip,
int whichfork)
{
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(ip, whichfork);
if (ifp->if_broot != NULL) {
......@@ -543,17 +519,13 @@ xfs_idestroy_fork(
*/
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
if (ifp->if_u1.if_data != NULL) {
ASSERT(ifp->if_real_bytes != 0);
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
ifp->if_real_bytes = 0;
}
} else if ((ifp->if_flags & XFS_IFEXTENTS) && ifp->if_height) {
xfs_iext_destroy(ifp);
}
ASSERT(ifp->if_real_bytes == 0);
if (whichfork == XFS_ATTR_FORK) {
kmem_zone_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
......@@ -620,7 +592,7 @@ xfs_iflush_fork(
int whichfork)
{
char *cp;
xfs_ifork_t *ifp;
struct xfs_ifork *ifp;
xfs_mount_t *mp;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
......
......@@ -12,9 +12,9 @@ struct xfs_dinode;
/*
* File incore extent information, present for each of data & attr forks.
*/
typedef struct xfs_ifork {
struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
int if_real_bytes; /* bytes allocated in if_u1 */
unsigned int if_seq; /* cow fork mod counter */
struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
......@@ -23,7 +23,7 @@ typedef struct xfs_ifork {
void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
} xfs_ifork_t;
};
/*
* Per-fork incore inode flags.
......
......@@ -77,6 +77,19 @@ static inline uint xlog_get_cycle(char *ptr)
#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
/*
* Log item for unmount records.
*
* The unmount record used to have a string "Unmount filesystem--" in the
* data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
* We just write the magic number now; see xfs_log_unmount_write.
*/
struct xfs_unmount_log_format {
uint16_t magic; /* XLOG_UNMOUNT_TYPE */
uint16_t pad1;
uint32_t pad2; /* may as well make it 64 bits */
};
/* Region types for iovec's i_type */
#define XLOG_REG_TYPE_BFORMAT 1
#define XLOG_REG_TYPE_BCHUNK 2
......
......@@ -34,11 +34,9 @@ enum xfs_refc_adjust_op {
};
STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
xfs_agblock_t agbno, xfs_extlen_t aglen,
struct xfs_defer_ops *dfops);
xfs_agblock_t agbno, xfs_extlen_t aglen);
STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
xfs_agblock_t agbno, xfs_extlen_t aglen,
struct xfs_defer_ops *dfops);
xfs_agblock_t agbno, xfs_extlen_t aglen);
/*
* Look up the first record less than or equal to [bno, len] in the btree
......@@ -870,7 +868,6 @@ xfs_refcount_adjust_extents(
xfs_agblock_t *agbno,
xfs_extlen_t *aglen,
enum xfs_refc_adjust_op adj,
struct xfs_defer_ops *dfops,
struct xfs_owner_info *oinfo)
{
struct xfs_refcount_irec ext, tmp;
......@@ -925,8 +922,8 @@ xfs_refcount_adjust_extents(
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_private.a.agno,
tmp.rc_startblock);
xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
tmp.rc_blockcount, oinfo);
xfs_bmap_add_free(cur->bc_tp, fsbno,
tmp.rc_blockcount, oinfo);
}
(*agbno) += tmp.rc_blockcount;
......@@ -968,8 +965,8 @@ xfs_refcount_adjust_extents(
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_private.a.agno,
ext.rc_startblock);
xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
ext.rc_blockcount, oinfo);
xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
oinfo);
}
skip:
......@@ -998,7 +995,6 @@ xfs_refcount_adjust(
xfs_agblock_t *new_agbno,
xfs_extlen_t *new_aglen,
enum xfs_refc_adjust_op adj,
struct xfs_defer_ops *dfops,
struct xfs_owner_info *oinfo)
{
bool shape_changed;
......@@ -1043,7 +1039,7 @@ xfs_refcount_adjust(
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
adj, dfops, oinfo);
adj, oinfo);
if (error)
goto out_error;
......@@ -1067,7 +1063,7 @@ xfs_refcount_finish_one_cleanup(
if (rcur == NULL)
return;
agbp = rcur->bc_private.a.agbp;
xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
}
......@@ -1082,7 +1078,6 @@ xfs_refcount_finish_one_cleanup(
int
xfs_refcount_finish_one(
struct xfs_trans *tp,
struct xfs_defer_ops *dfops,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount,
......@@ -1132,7 +1127,7 @@ xfs_refcount_finish_one(
if (!agbp)
return -EFSCORRUPTED;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
if (!rcur) {
error = -ENOMEM;
goto out_cur;
......@@ -1145,23 +1140,23 @@ xfs_refcount_finish_one(
switch (type) {
case XFS_REFCOUNT_INCREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
new_len, XFS_REFCOUNT_ADJUST_INCREASE, NULL);
*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
break;
case XFS_REFCOUNT_DECREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
new_len, XFS_REFCOUNT_ADJUST_DECREASE, NULL);
*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
break;
case XFS_REFCOUNT_ALLOC_COW:
*new_fsb = startblock + blockcount;
*new_len = 0;
error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
error = __xfs_refcount_cow_alloc(rcur, bno, blockcount);
break;
case XFS_REFCOUNT_FREE_COW:
*new_fsb = startblock + blockcount;
*new_len = 0;
error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
error = __xfs_refcount_cow_free(rcur, bno, blockcount);
break;
default:
ASSERT(0);
......@@ -1183,16 +1178,16 @@ xfs_refcount_finish_one(
*/
static int
__xfs_refcount_add(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount)
{
struct xfs_refcount_intent *ri;
trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
type, XFS_FSB_TO_AGBNO(mp, startblock),
trace_xfs_refcount_defer(tp->t_mountp,
XFS_FSB_TO_AGNO(tp->t_mountp, startblock),
type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
blockcount);
ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
......@@ -1202,7 +1197,7 @@ __xfs_refcount_add(
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
return 0;
}
......@@ -1211,14 +1206,13 @@ __xfs_refcount_add(
*/
int
xfs_refcount_increase_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_sb_version_hasreflink(&mp->m_sb))
if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
return 0;
return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
return __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE,
PREV->br_startblock, PREV->br_blockcount);
}
......@@ -1227,14 +1221,13 @@ xfs_refcount_increase_extent(
*/
int
xfs_refcount_decrease_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_sb_version_hasreflink(&mp->m_sb))
if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
return 0;
return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
return __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE,
PREV->br_startblock, PREV->br_blockcount);
}
......@@ -1522,8 +1515,7 @@ STATIC int
__xfs_refcount_cow_alloc(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
struct xfs_defer_ops *dfops)
xfs_extlen_t aglen)
{
trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
agbno, aglen);
......@@ -1540,8 +1532,7 @@ STATIC int
__xfs_refcount_cow_free(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
struct xfs_defer_ops *dfops)
xfs_extlen_t aglen)
{
trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
agbno, aglen);
......@@ -1554,47 +1545,45 @@ __xfs_refcount_cow_free(
/* Record a CoW staging extent in the refcount btree. */
int
xfs_refcount_alloc_cow_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
struct xfs_mount *mp = tp->t_mountp;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
fsb, len);
error = __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
if (error)
return error;
/* Add rmap entry */
return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
return xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
}
/* Forget a CoW staging event in the refcount btree. */
int
xfs_refcount_free_cow_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
struct xfs_mount *mp = tp->t_mountp;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
/* Remove rmap entry */
error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
error = xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
if (error)
return error;
return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
fsb, len);
return __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
}
struct xfs_refcount_recovery {
......@@ -1635,7 +1624,6 @@ xfs_refcount_recover_cow_leftovers(
struct list_head debris;
union xfs_btree_irec low;
union xfs_btree_irec high;
struct xfs_defer_ops dfops;
xfs_fsblock_t fsb;
xfs_agblock_t agbno;
int error;
......@@ -1666,7 +1654,7 @@ xfs_refcount_recover_cow_leftovers(
error = -ENOMEM;
goto out_trans;
}
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
/* Find all the leftover CoW staging extents. */
memset(&low, 0, sizeof(low));
......@@ -1675,11 +1663,11 @@ xfs_refcount_recover_cow_leftovers(
high.rc.rc_startblock = -1U;
error = xfs_btree_query_range(cur, &low, &high,
xfs_refcount_recover_extent, &debris);
if (error)
goto out_cursor;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
xfs_trans_cancel(tp);
if (error)
goto out_free;
/* Now iterate the list to free the leftovers */
list_for_each_entry_safe(rr, n, &debris, rr_list) {
......@@ -1691,21 +1679,15 @@ xfs_refcount_recover_cow_leftovers(
trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
/* Free the orphan record */
xfs_defer_init(&dfops, &fsb);
agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
error = xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);
if (error)
goto out_defer;
goto out_trans;
/* Free the block. */
xfs_bmap_add_free(mp, &dfops, fsb,
rr->rr_rrec.rc_blockcount, NULL);
error = xfs_defer_finish(&tp, &dfops);
if (error)
goto out_defer;
xfs_bmap_add_free(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
error = xfs_trans_commit(tp);
if (error)
......@@ -1716,8 +1698,6 @@ xfs_refcount_recover_cow_leftovers(
}
return error;
out_defer:
xfs_defer_cancel(&dfops);
out_trans:
xfs_trans_cancel(tp);
out_free:
......@@ -1727,11 +1707,6 @@ xfs_refcount_recover_cow_leftovers(
kmem_free(rr);
}
return error;
out_cursor:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
xfs_trans_brelse(tp, agbp);
goto out_trans;
}
/* Is there a record covering a given extent? */
......
......@@ -29,29 +29,26 @@ struct xfs_refcount_intent {
xfs_extlen_t ri_blockcount;
};
extern int xfs_refcount_increase_extent(struct xfs_mount *mp,
struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
extern int xfs_refcount_decrease_extent(struct xfs_mount *mp,
struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
extern int xfs_refcount_increase_extent(struct xfs_trans *tp,
struct xfs_bmbt_irec *irec);
extern int xfs_refcount_decrease_extent(struct xfs_trans *tp,
struct xfs_bmbt_irec *irec);
extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
extern int xfs_refcount_finish_one(struct xfs_trans *tp,
struct xfs_defer_ops *dfops, enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock, xfs_extlen_t blockcount,
xfs_fsblock_t *new_fsb, xfs_extlen_t *new_len,
struct xfs_btree_cur **pcur);
enum xfs_refcount_intent_type type, xfs_fsblock_t startblock,
xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb,
xfs_extlen_t *new_len, struct xfs_btree_cur **pcur);
extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
xfs_extlen_t *flen, bool find_end_of_shared);
extern int xfs_refcount_alloc_cow_extent(struct xfs_mount *mp,
struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
xfs_extlen_t len);
extern int xfs_refcount_free_cow_extent(struct xfs_mount *mp,
struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
xfs_extlen_t len);
extern int xfs_refcount_alloc_cow_extent(struct xfs_trans *tp,
xfs_fsblock_t fsb, xfs_extlen_t len);
extern int xfs_refcount_free_cow_extent(struct xfs_trans *tp,
xfs_fsblock_t fsb, xfs_extlen_t len);
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
xfs_agnumber_t agno);
......
......@@ -27,8 +27,7 @@ xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_private.a.agbp, cur->bc_private.a.agno,
cur->bc_private.a.dfops);
cur->bc_private.a.agbp, cur->bc_private.a.agno);
}
STATIC void
......@@ -71,7 +70,6 @@ xfs_refcountbt_alloc_block(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
xfs_refc_block(args.mp));
args.firstblock = args.fsbno;
xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
args.minlen = args.maxlen = args.prod = 1;
args.resv = XFS_AG_RESV_METADATA;
......@@ -323,8 +321,7 @@ xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agnumber_t agno,
struct xfs_defer_ops *dfops)
xfs_agnumber_t agno)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
......@@ -344,7 +341,6 @@ xfs_refcountbt_init_cursor(
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
cur->bc_private.a.dfops = dfops;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_private.a.priv.refc.nr_ops = 0;
......@@ -408,6 +404,7 @@ xfs_refcountbt_max_size(
int
xfs_refcountbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
......@@ -422,14 +419,14 @@ xfs_refcountbt_calc_reserves(
return 0;
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
xfs_buf_relse(agbp);
xfs_trans_brelse(tp, agbp);
*ask += xfs_refcountbt_max_size(mp, agblocks);
*used += tree_len;
......
......@@ -44,8 +44,8 @@ struct xfs_mount;
((index) - 1) * sizeof(xfs_refcount_ptr_t)))
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
struct xfs_defer_ops *dfops);
struct xfs_trans *tp, struct xfs_buf *agbp,
xfs_agnumber_t agno);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
......@@ -55,6 +55,7 @@ extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
xfs_extlen_t *used);
#endif /* __XFS_REFCOUNT_BTREE_H__ */
......@@ -670,14 +670,8 @@ xfs_rmap_free(
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
if (error)
goto out_error;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return 0;
out_error:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
xfs_btree_del_cursor(cur, error);
return error;
}
......@@ -753,19 +747,19 @@ xfs_rmap_map(
&have_lt);
if (error)
goto out_error;
XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
if (error)
goto out_error;
XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
if (have_lt) {
error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
if (error)
goto out_error;
XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
have_lt = 0;
if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
have_lt = 0;
}
XFS_WANT_CORRUPTED_GOTO(mp,
have_lt == 0 ||
......@@ -912,14 +906,8 @@ xfs_rmap_alloc(
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
if (error)
goto out_error;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return 0;
out_error:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
xfs_btree_del_cursor(cur, error);
return error;
}
......@@ -2156,7 +2144,7 @@ xfs_rmap_finish_one_cleanup(
if (rcur == NULL)
return;
agbp = rcur->bc_private.a.agbp;
xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
}
......@@ -2289,18 +2277,18 @@ xfs_rmap_update_is_needed(
*/
static int
__xfs_rmap_add(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
enum xfs_rmap_intent_type type,
uint64_t owner,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
struct xfs_rmap_intent *ri;
struct xfs_rmap_intent *ri;
trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
trace_xfs_rmap_defer(tp->t_mountp,
XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
type,
XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
owner, whichfork,
bmap->br_startoff,
bmap->br_blockcount,
......@@ -2313,23 +2301,22 @@ __xfs_rmap_add(
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
return 0;
}
/* Map an extent into a file. */
int
xfs_rmap_map_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_rmap_update_is_needed(mp, whichfork))
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return 0;
return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_MAP_SHARED : XFS_RMAP_MAP, ip->i_ino,
whichfork, PREV);
}
......@@ -2337,25 +2324,29 @@ xfs_rmap_map_extent(
/* Unmap an extent out of a file. */
int
xfs_rmap_unmap_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_rmap_update_is_needed(mp, whichfork))
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return 0;
return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_UNMAP_SHARED : XFS_RMAP_UNMAP, ip->i_ino,
whichfork, PREV);
}
/* Convert a data fork extent from unwritten to real or vice versa. */
/*
* Convert a data fork extent from unwritten to real or vice versa.
*
* Note that tp can be NULL here as no transaction is used for COW fork
* unwritten conversion.
*/
int
xfs_rmap_convert_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
......@@ -2363,7 +2354,7 @@ xfs_rmap_convert_extent(
if (!xfs_rmap_update_is_needed(mp, whichfork))
return 0;
return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_CONVERT_SHARED : XFS_RMAP_CONVERT, ip->i_ino,
whichfork, PREV);
}
......@@ -2371,8 +2362,7 @@ xfs_rmap_convert_extent(
/* Schedule the creation of an rmap for non-file data. */
int
xfs_rmap_alloc_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
......@@ -2380,23 +2370,21 @@ xfs_rmap_alloc_extent(
{
struct xfs_bmbt_irec bmap;
if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return 0;
bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner,
XFS_DATA_FORK, &bmap);
return __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
}
/* Schedule the deletion of an rmap for non-file data. */
int
xfs_rmap_free_extent(
struct xfs_mount *mp,
struct xfs_defer_ops *dfops,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
......@@ -2404,16 +2392,15 @@ xfs_rmap_free_extent(
{
struct xfs_bmbt_irec bmap;
if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return 0;
bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
XFS_DATA_FORK, &bmap);
return __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
}
/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
......
......@@ -185,21 +185,17 @@ struct xfs_rmap_intent {
};
/* functions for updating the rmapbt based on bmbt map/unmap operations */
int xfs_rmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
int xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);
int xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);
int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
int xfs_rmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
int xfs_rmap_alloc_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
uint64_t owner);
int xfs_rmap_free_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
uint64_t owner);
int xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
int xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
......
......@@ -554,6 +554,7 @@ xfs_rmapbt_max_size(
int
xfs_rmapbt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
......@@ -567,14 +568,14 @@ xfs_rmapbt_calc_reserves(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
xfs_buf_relse(agbp);
xfs_trans_brelse(tp, agbp);
/* Reserve 1% of the AG or enough for 1 block per record. */
*ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
......
......@@ -51,7 +51,7 @@ extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
#endif /* __XFS_RMAP_BTREE_H__ */
This diff is collapsed.
......@@ -64,6 +64,18 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
#define XFS_TRANS_NO_WRITECOUNT 0x40 /* do not elevate SB writecount */
#define XFS_TRANS_NOFS 0x80 /* pass KM_NOFS to kmem_alloc */
/*
* LOWMODE is used by the allocator to activate the lowspace algorithm - when
* free space is running low the extent allocator may choose to allocate an
* extent from an AG without leaving sufficient space for a btree split when
* inserting the new extent. In this case the allocator will enable the
* lowspace algorithm which is supposed to allow further allocations (such as
* btree splits and newroots) to allocate from sequential AGs. In order to
* avoid locking AGs out of order the lowspace algorithm will start searching
* for free space from AG 0. If the correct transaction reservations have been
* made then this algorithm will eventually find all the space it needs.
*/
#define XFS_TRANS_LOWMODE 0x100 /* allocate in low space mode */
/*
* Field values for xfs_trans_mod_sb.
......
......@@ -171,3 +171,37 @@ xfs_verify_rtbno(
{
return rtbno < mp->m_sb.sb_rblocks;
}
/* Calculate the range of valid icount values. */
static void
xfs_icount_range(
struct xfs_mount *mp,
unsigned long long *min,
unsigned long long *max)
{
unsigned long long nr_inos = 0;
xfs_agnumber_t agno;
/* root, rtbitmap, rtsum all live in the first chunk */
*min = XFS_INODES_PER_CHUNK;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
xfs_agino_t first, last;
xfs_agino_range(mp, agno, &first, &last);
nr_inos += last - first + 1;
}
*max = nr_inos;
}
/* Sanity-checking of inode counts. */
bool
xfs_verify_icount(
struct xfs_mount *mp,
unsigned long long icount)
{
unsigned long long min, max;
xfs_icount_range(mp, &min, &max);
return icount >= min && icount <= max;
}
......@@ -165,5 +165,6 @@ bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
#endif /* __XFS_TYPES_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
*/
#ifndef __XFS_SCRUB_BITMAP_H__
#define __XFS_SCRUB_BITMAP_H__
struct xfs_bitmap_range {
struct list_head list;
uint64_t start;
uint64_t len;
};
struct xfs_bitmap {
struct list_head list;
};
void xfs_bitmap_init(struct xfs_bitmap *bitmap);
void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
struct xfs_btree_cur *cur);
int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
struct xfs_btree_cur *cur);
#endif /* __XFS_SCRUB_BITMAP_H__ */
This diff is collapsed.
This diff is collapsed.
......@@ -9,44 +9,43 @@
/* btree scrub */
/* Check for btree operation errors. */
bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc,
bool xchk_btree_process_error(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree xref operation errors. */
bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level,
int *error);
bool xchk_btree_xref_process_error(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree corruption. */
void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc,
void xchk_btree_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
/* Check for btree xref discrepancies. */
void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
void xchk_btree_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
struct xfs_scrub_btree;
typedef int (*xfs_scrub_btree_rec_fn)(
struct xfs_scrub_btree *bs,
struct xchk_btree;
typedef int (*xchk_btree_rec_fn)(
struct xchk_btree *bs,
union xfs_btree_rec *rec);
struct xfs_scrub_btree {
struct xchk_btree {
/* caller-provided scrub state */
struct xfs_scrub_context *sc;
struct xfs_btree_cur *cur;
xfs_scrub_btree_rec_fn scrub_rec;
struct xfs_owner_info *oinfo;
void *private;
struct xfs_scrub *sc;
struct xfs_btree_cur *cur;
xchk_btree_rec_fn scrub_rec;
struct xfs_owner_info *oinfo;
void *private;
/* internal scrub state */
union xfs_btree_rec lastrec;
bool firstrec;
union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
bool firstkey[XFS_BTREE_MAXLEVELS];
struct list_head to_check;
union xfs_btree_rec lastrec;
bool firstrec;
union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
bool firstkey[XFS_BTREE_MAXLEVELS];
struct list_head to_check;
};
int xfs_scrub_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
xfs_scrub_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo, void *private);
int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
xchk_btree_rec_fn scrub_fn, struct xfs_owner_info *oinfo,
void *private);
#endif /* __XFS_SCRUB_BTREE_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -22,9 +22,9 @@
/* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t
xfs_scrub_btree_cur_fsbno(
struct xfs_btree_cur *cur,
int level)
xchk_btree_cur_fsbno(
struct xfs_btree_cur *cur,
int level)
{
if (level < cur->bc_nlevels && cur->bc_bufs[level])
return XFS_DADDR_TO_FSB(cur->bc_mp, cur->bc_bufs[level]->b_bn);
......
This diff is collapsed.
......@@ -8,7 +8,6 @@
#ifdef CONFIG_XFS_DEBUG
#define DEBUG 1
#define XFS_BUF_LOCK_TRACKING 1
#endif
#ifdef CONFIG_XFS_ASSERT_FATAL
......
This diff is collapsed.
This diff is collapsed.
......@@ -26,6 +26,7 @@
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_dir2.h"
#include "xfs_defer.h"
/*
* Look at all the extents for this logical region,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment