Commit c0927a7a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-6.3-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull moar xfs updates from Darrick Wong:
 "This contains a fix for a deadlock in the allocator. It continues the
  slow march towards being able to offline AGs, and it refactors the
  interface to the xfs allocator to be less indirection happy.

  Summary:

   - Fix a deadlock in the free space allocator due to the AG-walking
     algorithm forgetting to follow AG-order locking rules

   - Make the inode allocator prefer existing free inodes instead of
     failing to allocate new inode chunks when free space is low

   - Set minleft correctly when setting allocator parameters for bmap
     changes

   - Fix uninitialized variable access in the getfsmap code

   - Make a distinction between active and passive per-AG structure
     references. For now, active references are taken to perform some
     work in an AG on behalf of a high level operation; passive
     references are used by lower level code to finish operations
     started by other threads. Eventually this will become part of
     online shrink

   - Split out all the different allocator strategies into separate
     functions to move us away from design antipattern of filling out a
     huge structure for various differentish things and issuing a single
     function multiplexing call

   - Various cleanups in the filestreams allocator code, which we might
     very well want to deprecate instead of continuing

   - Fix a bug with the agi rotor code that was introduced earlier in
     this series"

* tag 'xfs-6.3-merge-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (44 commits)
  xfs: restore old agirotor behavior
  xfs: fix uninitialized variable access
  xfs: refactor the filestreams allocator pick functions
  xfs: return a referenced perag from filestreams allocator
  xfs: pass perag to filestreams tracing
  xfs: use for_each_perag_wrap in xfs_filestream_pick_ag
  xfs: track an active perag reference in filestreams
  xfs: factor out MRU hit case in xfs_filestream_select_ag
  xfs: remove xfs_filestream_select_ag() longest extent check
  xfs: merge new filestream AG selection into xfs_filestream_select_ag()
  xfs: merge filestream AG lookup into xfs_filestream_select_ag()
  xfs: move xfs_bmap_btalloc_filestreams() to xfs_filestreams.c
  xfs: use xfs_bmap_longest_free_extent() in filestreams
  xfs: get rid of notinit from xfs_bmap_longest_free_extent
  xfs: factor out filestreams from xfs_bmap_btalloc_nullfb
  xfs: convert trim to use for_each_perag_range
  xfs: convert xfs_alloc_vextent_iterate_ags() to use perag walker
  xfs: move the minimum agno checks into xfs_alloc_vextent_check_args
  xfs: fold xfs_alloc_ag_vextent() into callers
  xfs: move allocation accounting to xfs_alloc_vextent_set_fsbno()
  ...
parents 1899946a 6e2985c9
...@@ -44,16 +44,15 @@ xfs_perag_get( ...@@ -44,16 +44,15 @@ xfs_perag_get(
xfs_agnumber_t agno) xfs_agnumber_t agno)
{ {
struct xfs_perag *pag; struct xfs_perag *pag;
int ref = 0;
rcu_read_lock(); rcu_read_lock();
pag = radix_tree_lookup(&mp->m_perag_tree, agno); pag = radix_tree_lookup(&mp->m_perag_tree, agno);
if (pag) { if (pag) {
trace_xfs_perag_get(pag, _RET_IP_);
ASSERT(atomic_read(&pag->pag_ref) >= 0); ASSERT(atomic_read(&pag->pag_ref) >= 0);
ref = atomic_inc_return(&pag->pag_ref); atomic_inc(&pag->pag_ref);
} }
rcu_read_unlock(); rcu_read_unlock();
trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
return pag; return pag;
} }
...@@ -68,7 +67,6 @@ xfs_perag_get_tag( ...@@ -68,7 +67,6 @@ xfs_perag_get_tag(
{ {
struct xfs_perag *pag; struct xfs_perag *pag;
int found; int found;
int ref;
rcu_read_lock(); rcu_read_lock();
found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
...@@ -77,9 +75,9 @@ xfs_perag_get_tag( ...@@ -77,9 +75,9 @@ xfs_perag_get_tag(
rcu_read_unlock(); rcu_read_unlock();
return NULL; return NULL;
} }
ref = atomic_inc_return(&pag->pag_ref); trace_xfs_perag_get_tag(pag, _RET_IP_);
atomic_inc(&pag->pag_ref);
rcu_read_unlock(); rcu_read_unlock();
trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
return pag; return pag;
} }
...@@ -87,11 +85,68 @@ void ...@@ -87,11 +85,68 @@ void
xfs_perag_put( xfs_perag_put(
struct xfs_perag *pag) struct xfs_perag *pag)
{ {
int ref; trace_xfs_perag_put(pag, _RET_IP_);
ASSERT(atomic_read(&pag->pag_ref) > 0); ASSERT(atomic_read(&pag->pag_ref) > 0);
ref = atomic_dec_return(&pag->pag_ref); atomic_dec(&pag->pag_ref);
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); }
/*
* Active references for perag structures. This is for short term access to the
* per ag structures for walking trees or accessing state. If an AG is being
* shrunk or is offline, then this will fail to find that AG and return NULL
* instead.
*/
struct xfs_perag *
xfs_perag_grab(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
struct xfs_perag *pag;
rcu_read_lock();
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
if (pag) {
trace_xfs_perag_grab(pag, _RET_IP_);
if (!atomic_inc_not_zero(&pag->pag_active_ref))
pag = NULL;
}
rcu_read_unlock();
return pag;
}
/*
* search from @first to find the next perag with the given tag set.
*/
struct xfs_perag *
xfs_perag_grab_tag(
struct xfs_mount *mp,
xfs_agnumber_t first,
int tag)
{
struct xfs_perag *pag;
int found;
rcu_read_lock();
found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
(void **)&pag, first, 1, tag);
if (found <= 0) {
rcu_read_unlock();
return NULL;
}
trace_xfs_perag_grab_tag(pag, _RET_IP_);
if (!atomic_inc_not_zero(&pag->pag_active_ref))
pag = NULL;
rcu_read_unlock();
return pag;
}
void
xfs_perag_rele(
struct xfs_perag *pag)
{
trace_xfs_perag_rele(pag, _RET_IP_);
if (atomic_dec_and_test(&pag->pag_active_ref))
wake_up(&pag->pag_active_wq);
} }
/* /*
...@@ -196,6 +251,10 @@ xfs_free_perag( ...@@ -196,6 +251,10 @@ xfs_free_perag(
cancel_delayed_work_sync(&pag->pag_blockgc_work); cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_buf_hash_destroy(pag); xfs_buf_hash_destroy(pag);
/* drop the mount's active reference */
xfs_perag_rele(pag);
XFS_IS_CORRUPT(pag->pag_mount,
atomic_read(&pag->pag_active_ref) != 0);
call_rcu(&pag->rcu_head, __xfs_free_perag); call_rcu(&pag->rcu_head, __xfs_free_perag);
} }
} }
...@@ -314,6 +373,7 @@ xfs_initialize_perag( ...@@ -314,6 +373,7 @@ xfs_initialize_perag(
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
init_waitqueue_head(&pag->pagb_wait); init_waitqueue_head(&pag->pagb_wait);
init_waitqueue_head(&pag->pag_active_wq);
pag->pagb_count = 0; pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT; pag->pagb_tree = RB_ROOT;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
...@@ -322,6 +382,9 @@ xfs_initialize_perag( ...@@ -322,6 +382,9 @@ xfs_initialize_perag(
if (error) if (error)
goto out_remove_pag; goto out_remove_pag;
/* Active ref owned by mount indicates AG is online. */
atomic_set(&pag->pag_active_ref, 1);
/* first new pag is fully initialized */ /* first new pag is fully initialized */
if (first_initialised == NULLAGNUMBER) if (first_initialised == NULLAGNUMBER)
first_initialised = index; first_initialised = index;
...@@ -824,7 +887,7 @@ xfs_ag_shrink_space( ...@@ -824,7 +887,7 @@ xfs_ag_shrink_space(
struct xfs_alloc_arg args = { struct xfs_alloc_arg args = {
.tp = *tpp, .tp = *tpp,
.mp = mp, .mp = mp,
.type = XFS_ALLOCTYPE_THIS_BNO, .pag = pag,
.minlen = delta, .minlen = delta,
.maxlen = delta, .maxlen = delta,
.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
...@@ -856,14 +919,11 @@ xfs_ag_shrink_space( ...@@ -856,14 +919,11 @@ xfs_ag_shrink_space(
if (delta >= aglen) if (delta >= aglen)
return -EINVAL; return -EINVAL;
args.fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta);
/* /*
* Make sure that the last inode cluster cannot overlap with the new * Make sure that the last inode cluster cannot overlap with the new
* end of the AG, even if it's sparse. * end of the AG, even if it's sparse.
*/ */
error = xfs_ialloc_check_shrink(*tpp, pag->pag_agno, agibp, error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta);
aglen - delta);
if (error) if (error)
return error; return error;
...@@ -876,7 +936,8 @@ xfs_ag_shrink_space( ...@@ -876,7 +936,8 @@ xfs_ag_shrink_space(
return error; return error;
/* internal log shouldn't also show up in the free space btrees */ /* internal log shouldn't also show up in the free space btrees */
error = xfs_alloc_vextent(&args); error = xfs_alloc_vextent_exact_bno(&args,
XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta));
if (!error && args.agbno == NULLAGBLOCK) if (!error && args.agbno == NULLAGBLOCK)
error = -ENOSPC; error = -ENOSPC;
......
...@@ -32,14 +32,12 @@ struct xfs_ag_resv { ...@@ -32,14 +32,12 @@ struct xfs_ag_resv {
struct xfs_perag { struct xfs_perag {
struct xfs_mount *pag_mount; /* owner filesystem */ struct xfs_mount *pag_mount; /* owner filesystem */
xfs_agnumber_t pag_agno; /* AG this structure belongs to */ xfs_agnumber_t pag_agno; /* AG this structure belongs to */
atomic_t pag_ref; /* perag reference count */ atomic_t pag_ref; /* passive reference count */
char pagf_init; /* this agf's entry is initialized */ atomic_t pag_active_ref; /* active reference count */
char pagi_init; /* this agi's entry is initialized */ wait_queue_head_t pag_active_wq;/* woken active_ref falls to zero */
char pagf_metadata; /* the agf is preferred to be metadata */ unsigned long pag_opstate;
char pagi_inodeok; /* The agi is ok for inodes */
uint8_t pagf_levels[XFS_BTNUM_AGF]; uint8_t pagf_levels[XFS_BTNUM_AGF];
/* # of levels in bno & cnt btree */ /* # of levels in bno & cnt btree */
bool pagf_agflreset; /* agfl requires reset before use */
uint32_t pagf_flcount; /* count of blocks in freelist */ uint32_t pagf_flcount; /* count of blocks in freelist */
xfs_extlen_t pagf_freeblks; /* total free blocks */ xfs_extlen_t pagf_freeblks; /* total free blocks */
xfs_extlen_t pagf_longest; /* longest free space */ xfs_extlen_t pagf_longest; /* longest free space */
...@@ -106,16 +104,44 @@ struct xfs_perag { ...@@ -106,16 +104,44 @@ struct xfs_perag {
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
}; };
/*
* Per-AG operational state. These are atomic flag bits.
*/
#define XFS_AGSTATE_AGF_INIT 0
#define XFS_AGSTATE_AGI_INIT 1
#define XFS_AGSTATE_PREFERS_METADATA 2
#define XFS_AGSTATE_ALLOWS_INODES 3
#define XFS_AGSTATE_AGFL_NEEDS_RESET 4
#define __XFS_AG_OPSTATE(name, NAME) \
static inline bool xfs_perag_ ## name (struct xfs_perag *pag) \
{ \
return test_bit(XFS_AGSTATE_ ## NAME, &pag->pag_opstate); \
}
__XFS_AG_OPSTATE(initialised_agf, AGF_INIT)
__XFS_AG_OPSTATE(initialised_agi, AGI_INIT)
__XFS_AG_OPSTATE(prefers_metadata, PREFERS_METADATA)
__XFS_AG_OPSTATE(allows_inodes, ALLOWS_INODES)
__XFS_AG_OPSTATE(agfl_needs_reset, AGFL_NEEDS_RESET)
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount, int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
xfs_rfsblock_t dcount, xfs_agnumber_t *maxagi); xfs_rfsblock_t dcount, xfs_agnumber_t *maxagi);
int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno); int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
void xfs_free_perag(struct xfs_mount *mp); void xfs_free_perag(struct xfs_mount *mp);
/* Passive AG references */
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno); struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno, struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
unsigned int tag); unsigned int tag);
void xfs_perag_put(struct xfs_perag *pag); void xfs_perag_put(struct xfs_perag *pag);
/* Active AG references */
struct xfs_perag *xfs_perag_grab(struct xfs_mount *, xfs_agnumber_t);
struct xfs_perag *xfs_perag_grab_tag(struct xfs_mount *, xfs_agnumber_t,
int tag);
void xfs_perag_rele(struct xfs_perag *pag);
/* /*
* Per-ag geometry infomation and validation * Per-ag geometry infomation and validation
*/ */
...@@ -193,31 +219,86 @@ xfs_perag_next( ...@@ -193,31 +219,86 @@ xfs_perag_next(
struct xfs_mount *mp = pag->pag_mount; struct xfs_mount *mp = pag->pag_mount;
*agno = pag->pag_agno + 1; *agno = pag->pag_agno + 1;
xfs_perag_put(pag); xfs_perag_rele(pag);
if (*agno > end_agno) while (*agno <= end_agno) {
pag = xfs_perag_grab(mp, *agno);
if (pag)
return pag;
(*agno)++;
}
return NULL; return NULL;
return xfs_perag_get(mp, *agno);
} }
#define for_each_perag_range(mp, agno, end_agno, pag) \ #define for_each_perag_range(mp, agno, end_agno, pag) \
for ((pag) = xfs_perag_get((mp), (agno)); \ for ((pag) = xfs_perag_grab((mp), (agno)); \
(pag) != NULL; \ (pag) != NULL; \
(pag) = xfs_perag_next((pag), &(agno), (end_agno))) (pag) = xfs_perag_next((pag), &(agno), (end_agno)))
#define for_each_perag_from(mp, agno, pag) \ #define for_each_perag_from(mp, agno, pag) \
for_each_perag_range((mp), (agno), (mp)->m_sb.sb_agcount - 1, (pag)) for_each_perag_range((mp), (agno), (mp)->m_sb.sb_agcount - 1, (pag))
#define for_each_perag(mp, agno, pag) \ #define for_each_perag(mp, agno, pag) \
(agno) = 0; \ (agno) = 0; \
for_each_perag_from((mp), (agno), (pag)) for_each_perag_from((mp), (agno), (pag))
#define for_each_perag_tag(mp, agno, pag, tag) \ #define for_each_perag_tag(mp, agno, pag, tag) \
for ((agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \ for ((agno) = 0, (pag) = xfs_perag_grab_tag((mp), 0, (tag)); \
(pag) != NULL; \ (pag) != NULL; \
(agno) = (pag)->pag_agno + 1, \ (agno) = (pag)->pag_agno + 1, \
xfs_perag_put(pag), \ xfs_perag_rele(pag), \
(pag) = xfs_perag_get_tag((mp), (agno), (tag))) (pag) = xfs_perag_grab_tag((mp), (agno), (tag)))
static inline struct xfs_perag *
xfs_perag_next_wrap(
struct xfs_perag *pag,
xfs_agnumber_t *agno,
xfs_agnumber_t stop_agno,
xfs_agnumber_t restart_agno,
xfs_agnumber_t wrap_agno)
{
struct xfs_mount *mp = pag->pag_mount;
*agno = pag->pag_agno + 1;
xfs_perag_rele(pag);
while (*agno != stop_agno) {
if (*agno >= wrap_agno) {
if (restart_agno >= stop_agno)
break;
*agno = restart_agno;
}
pag = xfs_perag_grab(mp, *agno);
if (pag)
return pag;
(*agno)++;
}
return NULL;
}
/*
* Iterate all AGs from start_agno through wrap_agno, then restart_agno through
* (start_agno - 1).
*/
#define for_each_perag_wrap_range(mp, start_agno, restart_agno, wrap_agno, agno, pag) \
for ((agno) = (start_agno), (pag) = xfs_perag_grab((mp), (agno)); \
(pag) != NULL; \
(pag) = xfs_perag_next_wrap((pag), &(agno), (start_agno), \
(restart_agno), (wrap_agno)))
/*
* Iterate all AGs from start_agno through wrap_agno, then 0 through
* (start_agno - 1).
*/
#define for_each_perag_wrap_at(mp, start_agno, wrap_agno, agno, pag) \
for_each_perag_wrap_range((mp), (start_agno), 0, (wrap_agno), (agno), (pag))
/*
* Iterate all AGs from start_agno through to the end of the filesystem, then 0
* through (start_agno - 1).
*/
#define for_each_perag_wrap(mp, start_agno, agno, pag) \
for_each_perag_wrap_at((mp), (start_agno), (mp)->m_sb.sb_agcount, \
(agno), (pag))
struct aghdr_init_data { struct aghdr_init_data {
/* per ag data */ /* per ag data */
......
...@@ -264,7 +264,7 @@ xfs_ag_resv_init( ...@@ -264,7 +264,7 @@ xfs_ag_resv_init(
if (error) if (error)
goto out; goto out;
error = xfs_finobt_calc_reserves(mp, tp, pag, &ask, &used); error = xfs_finobt_calc_reserves(pag, tp, &ask, &used);
if (error) if (error)
goto out; goto out;
......
This diff is collapsed.
...@@ -16,25 +16,6 @@ extern struct workqueue_struct *xfs_alloc_wq; ...@@ -16,25 +16,6 @@ extern struct workqueue_struct *xfs_alloc_wq;
unsigned int xfs_agfl_size(struct xfs_mount *mp); unsigned int xfs_agfl_size(struct xfs_mount *mp);
/*
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/
#define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
#define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
#define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
#define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
#define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
/* this should become an enum again when the tracing code is fixed */
typedef unsigned int xfs_alloctype_t;
#define XFS_ALLOC_TYPES \
{ XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
{ XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
{ XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
{ XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
{ XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
/* /*
* Flags for xfs_alloc_fix_freelist. * Flags for xfs_alloc_fix_freelist.
*/ */
...@@ -68,8 +49,6 @@ typedef struct xfs_alloc_arg { ...@@ -68,8 +49,6 @@ typedef struct xfs_alloc_arg {
xfs_agblock_t min_agbno; /* set an agbno range for NEAR allocs */ xfs_agblock_t min_agbno; /* set an agbno range for NEAR allocs */
xfs_agblock_t max_agbno; /* ... */ xfs_agblock_t max_agbno; /* ... */
xfs_extlen_t len; /* output: actual size of extent */ xfs_extlen_t len; /* output: actual size of extent */
xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */
xfs_alloctype_t otype; /* original allocation type */
int datatype; /* mask defining data type treatment */ int datatype; /* mask defining data type treatment */
char wasdel; /* set if allocation was prev delayed */ char wasdel; /* set if allocation was prev delayed */
char wasfromfl; /* set if allocation is from freelist */ char wasfromfl; /* set if allocation is from freelist */
...@@ -118,11 +97,43 @@ xfs_alloc_log_agf( ...@@ -118,11 +97,43 @@ xfs_alloc_log_agf(
uint32_t fields);/* mask of fields to be logged (XFS_AGF_...) */ uint32_t fields);/* mask of fields to be logged (XFS_AGF_...) */
/* /*
* Allocate an extent (variable-size). * Allocate an extent anywhere in the specific AG given. If there is no
* space matching the requirements in that AG, then the allocation will fail.
*/ */
int /* error */ int xfs_alloc_vextent_this_ag(struct xfs_alloc_arg *args, xfs_agnumber_t agno);
xfs_alloc_vextent(
xfs_alloc_arg_t *args); /* allocation argument structure */ /*
* Allocate an extent as close to the target as possible. If there are not
* viable candidates in the AG, then fail the allocation.
*/
int xfs_alloc_vextent_near_bno(struct xfs_alloc_arg *args,
xfs_fsblock_t target);
/*
* Allocate an extent exactly at the target given. If this is not possible
* then the allocation fails.
*/
int xfs_alloc_vextent_exact_bno(struct xfs_alloc_arg *args,
xfs_fsblock_t target);
/*
* Best effort full filesystem allocation scan.
*
* Locality aware allocation will be attempted in the initial AG, but on failure
* non-localised attempts will be made. The AGs are constrained by previous
* allocations in the current transaction. Two passes will be made - the first
* non-blocking, the second blocking.
*/
int xfs_alloc_vextent_start_ag(struct xfs_alloc_arg *args,
xfs_fsblock_t target);
/*
* Iterate from the AG indicated from args->fsbno through to the end of the
* filesystem attempting blocking allocation. This is for use in last
* resort allocation attempts when everything else has failed.
*/
int xfs_alloc_vextent_first_ag(struct xfs_alloc_arg *args,
xfs_fsblock_t target);
/* /*
* Free an extent. * Free an extent.
......
...@@ -315,7 +315,7 @@ xfs_allocbt_verify( ...@@ -315,7 +315,7 @@ xfs_allocbt_verify(
level = be16_to_cpu(block->bb_level); level = be16_to_cpu(block->bb_level);
if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
btnum = XFS_BTNUM_CNTi; btnum = XFS_BTNUM_CNTi;
if (pag && pag->pagf_init) { if (pag && xfs_perag_initialised_agf(pag)) {
if (level >= pag->pagf_levels[btnum]) if (level >= pag->pagf_levels[btnum])
return __this_address; return __this_address;
} else if (level >= mp->m_alloc_maxlevels) } else if (level >= mp->m_alloc_maxlevels)
......
This diff is collapsed.
...@@ -12,6 +12,7 @@ struct xfs_ifork; ...@@ -12,6 +12,7 @@ struct xfs_ifork;
struct xfs_inode; struct xfs_inode;
struct xfs_mount; struct xfs_mount;
struct xfs_trans; struct xfs_trans;
struct xfs_alloc_arg;
/* /*
* Argument structure for xfs_bmap_alloc. * Argument structure for xfs_bmap_alloc.
...@@ -168,6 +169,8 @@ static inline bool xfs_bmap_is_written_extent(struct xfs_bmbt_irec *irec) ...@@ -168,6 +169,8 @@ static inline bool xfs_bmap_is_written_extent(struct xfs_bmbt_irec *irec)
#define xfs_valid_startblock(ip, startblock) \ #define xfs_valid_startblock(ip, startblock) \
((startblock) != 0 || XFS_IS_REALTIME_INODE(ip)) ((startblock) != 0 || XFS_IS_REALTIME_INODE(ip))
int xfs_bmap_longest_free_extent(struct xfs_perag *pag,
struct xfs_trans *tp, xfs_extlen_t *blen);
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len); xfs_filblks_t len);
unsigned int xfs_bmap_compute_attr_offset(struct xfs_mount *mp); unsigned int xfs_bmap_compute_attr_offset(struct xfs_mount *mp);
...@@ -220,6 +223,10 @@ int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp, ...@@ -220,6 +223,10 @@ int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork, struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp, struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
struct xfs_bmbt_irec *new, int *logflagsp); struct xfs_bmbt_irec *new, int *logflagsp);
xfs_extlen_t xfs_bmapi_minleft(struct xfs_trans *tp, struct xfs_inode *ip,
int fork);
int xfs_bmap_btalloc_low_space(struct xfs_bmalloca *ap,
struct xfs_alloc_arg *args);
enum xfs_bmap_intent_type { enum xfs_bmap_intent_type {
XFS_BMAP_MAP = 1, XFS_BMAP_MAP = 1,
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "xfs_quota.h" #include "xfs_quota.h"
#include "xfs_trace.h" #include "xfs_trace.h"
#include "xfs_rmap.h" #include "xfs_rmap.h"
#include "xfs_ag.h"
static struct kmem_cache *xfs_bmbt_cur_cache; static struct kmem_cache *xfs_bmbt_cur_cache;
...@@ -184,11 +185,11 @@ xfs_bmbt_update_cursor( ...@@ -184,11 +185,11 @@ xfs_bmbt_update_cursor(
struct xfs_btree_cur *src, struct xfs_btree_cur *src,
struct xfs_btree_cur *dst) struct xfs_btree_cur *dst)
{ {
ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) || ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) ||
(dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME)); (dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME));
dst->bc_ino.allocated += src->bc_ino.allocated; dst->bc_ino.allocated += src->bc_ino.allocated;
dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock; dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno;
src->bc_ino.allocated = 0; src->bc_ino.allocated = 0;
} }
...@@ -200,46 +201,32 @@ xfs_bmbt_alloc_block( ...@@ -200,46 +201,32 @@ xfs_bmbt_alloc_block(
union xfs_btree_ptr *new, union xfs_btree_ptr *new,
int *stat) int *stat)
{ {
xfs_alloc_arg_t args; /* block allocation args */ struct xfs_alloc_arg args;
int error; /* error return value */ int error;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp; args.tp = cur->bc_tp;
args.mp = cur->bc_mp; args.mp = cur->bc_mp;
args.fsbno = cur->bc_tp->t_firstblock;
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino, xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
cur->bc_ino.whichfork); cur->bc_ino.whichfork);
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
if (!args.wasdel && args.tp->t_blk_res == 0)
return -ENOSPC;
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
args.type = XFS_ALLOCTYPE_START_BNO;
/* /*
* Make sure there is sufficient room left in the AG to * If we are coming here from something like unwritten extent
* complete a full tree split for an extent insert. If * conversion, there has been no data extent allocation already done, so
* we are converting the middle part of an extent then * we have to ensure that we attempt to locate the entire set of bmbt
* we may need space for two tree splits. * allocations in the same AG, as xfs_bmapi_write() would have reserved.
*
* We are relying on the caller to make the correct block
* reservation for this operation to succeed. If the
* reservation amount is insufficient then we may fail a
* block allocation here and corrupt the filesystem.
*/ */
args.minleft = args.tp->t_blk_res; if (cur->bc_tp->t_highest_agno == NULLAGNUMBER)
} else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) { args.minleft = xfs_bmapi_minleft(cur->bc_tp, cur->bc_ino.ip,
args.type = XFS_ALLOCTYPE_START_BNO; cur->bc_ino.whichfork);
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
}
args.minlen = args.maxlen = args.prod = 1; error = xfs_alloc_vextent_start_ag(&args, be64_to_cpu(start->l));
args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
if (!args.wasdel && args.tp->t_blk_res == 0) {
error = -ENOSPC;
goto error0;
}
error = xfs_alloc_vextent(&args);
if (error) if (error)
goto error0; return error;
if (args.fsbno == NULLFSBLOCK && args.minleft) { if (args.fsbno == NULLFSBLOCK && args.minleft) {
/* /*
...@@ -247,11 +234,10 @@ xfs_bmbt_alloc_block( ...@@ -247,11 +234,10 @@ xfs_bmbt_alloc_block(
* a full btree split. Try again and if * a full btree split. Try again and if
* successful activate the lowspace algorithm. * successful activate the lowspace algorithm.
*/ */
args.fsbno = 0; args.minleft = 0;
args.type = XFS_ALLOCTYPE_FIRST_AG; error = xfs_alloc_vextent_start_ag(&args, 0);
error = xfs_alloc_vextent(&args);
if (error) if (error)
goto error0; return error;
cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE; cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
} }
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
...@@ -260,7 +246,6 @@ xfs_bmbt_alloc_block( ...@@ -260,7 +246,6 @@ xfs_bmbt_alloc_block(
} }
ASSERT(args.len == 1); ASSERT(args.len == 1);
cur->bc_tp->t_firstblock = args.fsbno;
cur->bc_ino.allocated++; cur->bc_ino.allocated++;
cur->bc_ino.ip->i_nblocks++; cur->bc_ino.ip->i_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE); xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
...@@ -271,9 +256,6 @@ xfs_bmbt_alloc_block( ...@@ -271,9 +256,6 @@ xfs_bmbt_alloc_block(
*stat = 1; *stat = 1;
return 0; return 0;
error0:
return error;
} }
STATIC int STATIC int
......
...@@ -2943,7 +2943,7 @@ xfs_btree_split( ...@@ -2943,7 +2943,7 @@ xfs_btree_split(
DECLARE_COMPLETION_ONSTACK(done); DECLARE_COMPLETION_ONSTACK(done);
if (cur->bc_btnum != XFS_BTNUM_BMAP || if (cur->bc_btnum != XFS_BTNUM_BMAP ||
cur->bc_tp->t_firstblock == NULLFSBLOCK) cur->bc_tp->t_highest_agno == NULLAGNUMBER)
return __xfs_btree_split(cur, level, ptrp, key, curp, stat); return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
args.cur = cur; args.cur = cur;
......
This diff is collapsed.
...@@ -12,6 +12,7 @@ struct xfs_imap; ...@@ -12,6 +12,7 @@ struct xfs_imap;
struct xfs_mount; struct xfs_mount;
struct xfs_trans; struct xfs_trans;
struct xfs_btree_cur; struct xfs_btree_cur;
struct xfs_perag;
/* Move inodes in clusters of this size */ /* Move inodes in clusters of this size */
#define XFS_INODE_BIG_CLUSTER_SIZE 8192 #define XFS_INODE_BIG_CLUSTER_SIZE 8192
...@@ -47,7 +48,7 @@ int xfs_difree(struct xfs_trans *tp, struct xfs_perag *pag, ...@@ -47,7 +48,7 @@ int xfs_difree(struct xfs_trans *tp, struct xfs_perag *pag,
*/ */
int int
xfs_imap( xfs_imap(
struct xfs_mount *mp, /* file system mount structure */ struct xfs_perag *pag,
struct xfs_trans *tp, /* transaction pointer */ struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t ino, /* inode to locate */ xfs_ino_t ino, /* inode to locate */
struct xfs_imap *imap, /* location map structure */ struct xfs_imap *imap, /* location map structure */
...@@ -106,7 +107,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp); ...@@ -106,7 +107,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_setup_geometry(struct xfs_mount *mp); void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit); xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno, int xfs_ialloc_check_shrink(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_buf *agibp, xfs_agblock_t new_length); struct xfs_buf *agibp, xfs_agblock_t new_length);
#endif /* __XFS_IALLOC_H__ */ #endif /* __XFS_IALLOC_H__ */
...@@ -36,8 +36,8 @@ STATIC struct xfs_btree_cur * ...@@ -36,8 +36,8 @@ STATIC struct xfs_btree_cur *
xfs_inobt_dup_cursor( xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur) struct xfs_btree_cur *cur)
{ {
return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, return xfs_inobt_init_cursor(cur->bc_ag.pag, cur->bc_tp,
cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); cur->bc_ag.agbp, cur->bc_btnum);
} }
STATIC void STATIC void
...@@ -103,15 +103,15 @@ __xfs_inobt_alloc_block( ...@@ -103,15 +103,15 @@ __xfs_inobt_alloc_block(
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp; args.tp = cur->bc_tp;
args.mp = cur->bc_mp; args.mp = cur->bc_mp;
args.pag = cur->bc_ag.pag;
args.oinfo = XFS_RMAP_OINFO_INOBT; args.oinfo = XFS_RMAP_OINFO_INOBT;
args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno);
args.minlen = 1; args.minlen = 1;
args.maxlen = 1; args.maxlen = 1;
args.prod = 1; args.prod = 1;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.resv = resv; args.resv = resv;
error = xfs_alloc_vextent(&args); error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno, sbno));
if (error) if (error)
return error; return error;
...@@ -291,8 +291,8 @@ xfs_inobt_verify( ...@@ -291,8 +291,8 @@ xfs_inobt_verify(
* Similarly, during log recovery we will have a perag structure * Similarly, during log recovery we will have a perag structure
* attached, but the agi information will not yet have been initialised * attached, but the agi information will not yet have been initialised
* from the on disk AGI. We don't currently use any of this information, * from the on disk AGI. We don't currently use any of this information,
* but beware of the landmine (i.e. need to check pag->pagi_init) if we * but beware of the landmine (i.e. need to check
* ever do. * xfs_perag_initialised_agi(pag)) if we ever do.
*/ */
if (xfs_has_crc(mp)) { if (xfs_has_crc(mp)) {
fa = xfs_btree_sblock_v5hdr_verify(bp); fa = xfs_btree_sblock_v5hdr_verify(bp);
...@@ -427,11 +427,11 @@ static const struct xfs_btree_ops xfs_finobt_ops = { ...@@ -427,11 +427,11 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
*/ */
static struct xfs_btree_cur * static struct xfs_btree_cur *
xfs_inobt_init_common( xfs_inobt_init_common(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_perag *pag, struct xfs_perag *pag,
struct xfs_trans *tp, /* transaction pointer */
xfs_btnum_t btnum) /* ialloc or free ino btree */ xfs_btnum_t btnum) /* ialloc or free ino btree */
{ {
struct xfs_mount *mp = pag->pag_mount;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, cur = xfs_btree_alloc_cursor(mp, tp, btnum,
...@@ -456,16 +456,15 @@ xfs_inobt_init_common( ...@@ -456,16 +456,15 @@ xfs_inobt_init_common(
/* Create an inode btree cursor. */ /* Create an inode btree cursor. */
struct xfs_btree_cur * struct xfs_btree_cur *
xfs_inobt_init_cursor( xfs_inobt_init_cursor(
struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_trans *tp,
struct xfs_buf *agbp, struct xfs_buf *agbp,
struct xfs_perag *pag,
xfs_btnum_t btnum) xfs_btnum_t btnum)
{ {
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
struct xfs_agi *agi = agbp->b_addr; struct xfs_agi *agi = agbp->b_addr;
cur = xfs_inobt_init_common(mp, tp, pag, btnum); cur = xfs_inobt_init_common(pag, tp, btnum);
if (btnum == XFS_BTNUM_INO) if (btnum == XFS_BTNUM_INO)
cur->bc_nlevels = be32_to_cpu(agi->agi_level); cur->bc_nlevels = be32_to_cpu(agi->agi_level);
else else
...@@ -477,14 +476,13 @@ xfs_inobt_init_cursor( ...@@ -477,14 +476,13 @@ xfs_inobt_init_cursor(
/* Create an inode btree cursor with a fake root for staging. */ /* Create an inode btree cursor with a fake root for staging. */
struct xfs_btree_cur * struct xfs_btree_cur *
xfs_inobt_stage_cursor( xfs_inobt_stage_cursor(
struct xfs_mount *mp,
struct xbtree_afakeroot *afake,
struct xfs_perag *pag, struct xfs_perag *pag,
struct xbtree_afakeroot *afake,
xfs_btnum_t btnum) xfs_btnum_t btnum)
{ {
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
cur = xfs_inobt_init_common(mp, NULL, pag, btnum); cur = xfs_inobt_init_common(pag, NULL, btnum);
xfs_btree_stage_afakeroot(cur, afake); xfs_btree_stage_afakeroot(cur, afake);
return cur; return cur;
} }
...@@ -708,9 +706,8 @@ xfs_inobt_max_size( ...@@ -708,9 +706,8 @@ xfs_inobt_max_size(
/* Read AGI and create inobt cursor. */ /* Read AGI and create inobt cursor. */
int int
xfs_inobt_cur( xfs_inobt_cur(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_perag *pag, struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_btnum_t which, xfs_btnum_t which,
struct xfs_btree_cur **curpp, struct xfs_btree_cur **curpp,
struct xfs_buf **agi_bpp) struct xfs_buf **agi_bpp)
...@@ -725,16 +722,15 @@ xfs_inobt_cur( ...@@ -725,16 +722,15 @@ xfs_inobt_cur(
if (error) if (error)
return error; return error;
cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, pag, which); cur = xfs_inobt_init_cursor(pag, tp, *agi_bpp, which);
*curpp = cur; *curpp = cur;
return 0; return 0;
} }
static int static int
xfs_inobt_count_blocks( xfs_inobt_count_blocks(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_perag *pag, struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_btnum_t btnum, xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks) xfs_extlen_t *tree_blocks)
{ {
...@@ -742,7 +738,7 @@ xfs_inobt_count_blocks( ...@@ -742,7 +738,7 @@ xfs_inobt_count_blocks(
struct xfs_btree_cur *cur = NULL; struct xfs_btree_cur *cur = NULL;
int error; int error;
error = xfs_inobt_cur(mp, tp, pag, btnum, &cur, &agbp); error = xfs_inobt_cur(pag, tp, btnum, &cur, &agbp);
if (error) if (error)
return error; return error;
...@@ -779,22 +775,21 @@ xfs_finobt_read_blocks( ...@@ -779,22 +775,21 @@ xfs_finobt_read_blocks(
*/ */
int int
xfs_finobt_calc_reserves( xfs_finobt_calc_reserves(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_perag *pag, struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_extlen_t *ask, xfs_extlen_t *ask,
xfs_extlen_t *used) xfs_extlen_t *used)
{ {
xfs_extlen_t tree_len = 0; xfs_extlen_t tree_len = 0;
int error; int error;
if (!xfs_has_finobt(mp)) if (!xfs_has_finobt(pag->pag_mount))
return 0; return 0;
if (xfs_has_inobtcounts(mp)) if (xfs_has_inobtcounts(pag->pag_mount))
error = xfs_finobt_read_blocks(pag, tp, &tree_len); error = xfs_finobt_read_blocks(pag, tp, &tree_len);
else else
error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO, error = xfs_inobt_count_blocks(pag, tp, XFS_BTNUM_FINO,
&tree_len); &tree_len);
if (error) if (error)
return error; return error;
......
...@@ -46,12 +46,10 @@ struct xfs_perag; ...@@ -46,12 +46,10 @@ struct xfs_perag;
(maxrecs) * sizeof(xfs_inobt_key_t) + \ (maxrecs) * sizeof(xfs_inobt_key_t) + \
((index) - 1) * sizeof(xfs_inobt_ptr_t))) ((index) - 1) * sizeof(xfs_inobt_ptr_t)))
extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *mp, extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_perag *pag,
struct xfs_trans *tp, struct xfs_buf *agbp, struct xfs_trans *tp, struct xfs_buf *agbp, xfs_btnum_t btnum);
struct xfs_perag *pag, xfs_btnum_t btnum); struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_perag *pag,
struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_mount *mp, struct xbtree_afakeroot *afake, xfs_btnum_t btnum);
struct xbtree_afakeroot *afake, struct xfs_perag *pag,
xfs_btnum_t btnum);
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int); extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
/* ir_holemask to inode allocation bitmap conversion */ /* ir_holemask to inode allocation bitmap conversion */
...@@ -64,13 +62,13 @@ int xfs_inobt_rec_check_count(struct xfs_mount *, ...@@ -64,13 +62,13 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#define xfs_inobt_rec_check_count(mp, rec) 0 #define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */ #endif /* DEBUG */
int xfs_finobt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_finobt_calc_reserves(struct xfs_perag *perag, struct xfs_trans *tp,
struct xfs_perag *pag, xfs_extlen_t *ask, xfs_extlen_t *used); xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp, extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len); unsigned long long len);
int xfs_inobt_cur(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_inobt_cur(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_perag *pag, xfs_btnum_t btnum, xfs_btnum_t btnum, struct xfs_btree_cur **curpp,
struct xfs_btree_cur **curpp, struct xfs_buf **agi_bpp); struct xfs_buf **agi_bpp);
void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur, void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
struct xfs_trans *tp, struct xfs_buf *agbp); struct xfs_trans *tp, struct xfs_buf *agbp);
......
...@@ -67,14 +67,14 @@ xfs_refcountbt_alloc_block( ...@@ -67,14 +67,14 @@ xfs_refcountbt_alloc_block(
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp; args.tp = cur->bc_tp;
args.mp = cur->bc_mp; args.mp = cur->bc_mp;
args.type = XFS_ALLOCTYPE_NEAR_BNO; args.pag = cur->bc_ag.pag;
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
xfs_refc_block(args.mp));
args.oinfo = XFS_RMAP_OINFO_REFC; args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1; args.minlen = args.maxlen = args.prod = 1;
args.resv = XFS_AG_RESV_METADATA; args.resv = XFS_AG_RESV_METADATA;
error = xfs_alloc_vextent(&args); error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, args.pag->pag_agno,
xfs_refc_block(args.mp)));
if (error) if (error)
goto out_error; goto out_error;
trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno, trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
...@@ -227,7 +227,7 @@ xfs_refcountbt_verify( ...@@ -227,7 +227,7 @@ xfs_refcountbt_verify(
return fa; return fa;
level = be16_to_cpu(block->bb_level); level = be16_to_cpu(block->bb_level);
if (pag && pag->pagf_init) { if (pag && xfs_perag_initialised_agf(pag)) {
if (level >= pag->pagf_refcount_level) if (level >= pag->pagf_refcount_level)
return __this_address; return __this_address;
} else if (level >= mp->m_refc_maxlevels) } else if (level >= mp->m_refc_maxlevels)
......
...@@ -313,7 +313,7 @@ xfs_rmapbt_verify( ...@@ -313,7 +313,7 @@ xfs_rmapbt_verify(
return fa; return fa;
level = be16_to_cpu(block->bb_level); level = be16_to_cpu(block->bb_level);
if (pag && pag->pagf_init) { if (pag && xfs_perag_initialised_agf(pag)) {
if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi]) if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
return __this_address; return __this_address;
} else if (level >= mp->m_rmap_maxlevels) } else if (level >= mp->m_rmap_maxlevels)
......
...@@ -909,7 +909,8 @@ xfs_sb_mount_common( ...@@ -909,7 +909,8 @@ xfs_sb_mount_common(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_sb *sbp) struct xfs_sb *sbp)
{ {
mp->m_agfrotor = mp->m_agirotor = 0; mp->m_agfrotor = 0;
atomic_set(&mp->m_agirotor, 0);
mp->m_maxagi = mp->m_sb.sb_agcount; mp->m_maxagi = mp->m_sb.sb_agcount;
mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
......
...@@ -191,14 +191,15 @@ xrep_agf_init_header( ...@@ -191,14 +191,15 @@ xrep_agf_init_header(
struct xfs_agf *old_agf) struct xfs_agf *old_agf)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_agf *agf = agf_bp->b_addr; struct xfs_agf *agf = agf_bp->b_addr;
memcpy(old_agf, agf, sizeof(*old_agf)); memcpy(old_agf, agf, sizeof(*old_agf));
memset(agf, 0, BBTOB(agf_bp->b_length)); memset(agf, 0, BBTOB(agf_bp->b_length));
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
agf->agf_seqno = cpu_to_be32(sc->sa.pag->pag_agno); agf->agf_seqno = cpu_to_be32(pag->pag_agno);
agf->agf_length = cpu_to_be32(sc->sa.pag->block_count); agf->agf_length = cpu_to_be32(pag->block_count);
agf->agf_flfirst = old_agf->agf_flfirst; agf->agf_flfirst = old_agf->agf_flfirst;
agf->agf_fllast = old_agf->agf_fllast; agf->agf_fllast = old_agf->agf_fllast;
agf->agf_flcount = old_agf->agf_flcount; agf->agf_flcount = old_agf->agf_flcount;
...@@ -206,8 +207,8 @@ xrep_agf_init_header( ...@@ -206,8 +207,8 @@ xrep_agf_init_header(
uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
/* Mark the incore AGF data stale until we're done fixing things. */ /* Mark the incore AGF data stale until we're done fixing things. */
ASSERT(sc->sa.pag->pagf_init); ASSERT(xfs_perag_initialised_agf(pag));
sc->sa.pag->pagf_init = 0; clear_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
} }
/* Set btree root information in an AGF. */ /* Set btree root information in an AGF. */
...@@ -333,7 +334,7 @@ xrep_agf_commit_new( ...@@ -333,7 +334,7 @@ xrep_agf_commit_new(
pag->pagf_levels[XFS_BTNUM_RMAPi] = pag->pagf_levels[XFS_BTNUM_RMAPi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]); be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level); pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
pag->pagf_init = 1; set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
return 0; return 0;
} }
...@@ -434,7 +435,7 @@ xrep_agf( ...@@ -434,7 +435,7 @@ xrep_agf(
out_revert: out_revert:
/* Mark the incore AGF state stale and revert the AGF. */ /* Mark the incore AGF state stale and revert the AGF. */
sc->sa.pag->pagf_init = 0; clear_bit(XFS_AGSTATE_AGF_INIT, &sc->sa.pag->pag_opstate);
memcpy(agf, &old_agf, sizeof(old_agf)); memcpy(agf, &old_agf, sizeof(old_agf));
return error; return error;
} }
...@@ -618,7 +619,7 @@ xrep_agfl_update_agf( ...@@ -618,7 +619,7 @@ xrep_agfl_update_agf(
xfs_force_summary_recalc(sc->mp); xfs_force_summary_recalc(sc->mp);
/* Update the AGF counters. */ /* Update the AGF counters. */
if (sc->sa.pag->pagf_init) if (xfs_perag_initialised_agf(sc->sa.pag))
sc->sa.pag->pagf_flcount = flcount; sc->sa.pag->pagf_flcount = flcount;
agf->agf_flfirst = cpu_to_be32(0); agf->agf_flfirst = cpu_to_be32(0);
agf->agf_flcount = cpu_to_be32(flcount); agf->agf_flcount = cpu_to_be32(flcount);
...@@ -822,14 +823,15 @@ xrep_agi_init_header( ...@@ -822,14 +823,15 @@ xrep_agi_init_header(
struct xfs_agi *old_agi) struct xfs_agi *old_agi)
{ {
struct xfs_agi *agi = agi_bp->b_addr; struct xfs_agi *agi = agi_bp->b_addr;
struct xfs_perag *pag = sc->sa.pag;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
memcpy(old_agi, agi, sizeof(*old_agi)); memcpy(old_agi, agi, sizeof(*old_agi));
memset(agi, 0, BBTOB(agi_bp->b_length)); memset(agi, 0, BBTOB(agi_bp->b_length));
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
agi->agi_seqno = cpu_to_be32(sc->sa.pag->pag_agno); agi->agi_seqno = cpu_to_be32(pag->pag_agno);
agi->agi_length = cpu_to_be32(sc->sa.pag->block_count); agi->agi_length = cpu_to_be32(pag->block_count);
agi->agi_newino = cpu_to_be32(NULLAGINO); agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO); agi->agi_dirino = cpu_to_be32(NULLAGINO);
if (xfs_has_crc(mp)) if (xfs_has_crc(mp))
...@@ -840,8 +842,8 @@ xrep_agi_init_header( ...@@ -840,8 +842,8 @@ xrep_agi_init_header(
sizeof(agi->agi_unlinked)); sizeof(agi->agi_unlinked));
/* Mark the incore AGF data stale until we're done fixing things. */ /* Mark the incore AGF data stale until we're done fixing things. */
ASSERT(sc->sa.pag->pagi_init); ASSERT(xfs_perag_initialised_agi(pag));
sc->sa.pag->pagi_init = 0; clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
} }
/* Set btree root information in an AGI. */ /* Set btree root information in an AGI. */
...@@ -873,8 +875,7 @@ xrep_agi_calc_from_btrees( ...@@ -873,8 +875,7 @@ xrep_agi_calc_from_btrees(
xfs_agino_t freecount; xfs_agino_t freecount;
int error; int error;
cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp, XFS_BTNUM_INO);
sc->sa.pag, XFS_BTNUM_INO);
error = xfs_ialloc_count_inodes(cur, &count, &freecount); error = xfs_ialloc_count_inodes(cur, &count, &freecount);
if (error) if (error)
goto err; goto err;
...@@ -894,8 +895,8 @@ xrep_agi_calc_from_btrees( ...@@ -894,8 +895,8 @@ xrep_agi_calc_from_btrees(
if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) { if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) {
xfs_agblock_t blocks; xfs_agblock_t blocks;
cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp,
sc->sa.pag, XFS_BTNUM_FINO); XFS_BTNUM_FINO);
error = xfs_btree_count_blocks(cur, &blocks); error = xfs_btree_count_blocks(cur, &blocks);
if (error) if (error)
goto err; goto err;
...@@ -929,7 +930,7 @@ xrep_agi_commit_new( ...@@ -929,7 +930,7 @@ xrep_agi_commit_new(
pag = sc->sa.pag; pag = sc->sa.pag;
pag->pagi_count = be32_to_cpu(agi->agi_count); pag->pagi_count = be32_to_cpu(agi->agi_count);
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
pag->pagi_init = 1; set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
return 0; return 0;
} }
...@@ -994,7 +995,7 @@ xrep_agi( ...@@ -994,7 +995,7 @@ xrep_agi(
out_revert: out_revert:
/* Mark the incore AGI state stale and revert the AGI. */ /* Mark the incore AGI state stale and revert the AGI. */
sc->sa.pag->pagi_init = 0; clear_bit(XFS_AGSTATE_AGI_INIT, &sc->sa.pag->pag_opstate);
memcpy(agi, &old_agi, sizeof(old_agi)); memcpy(agi, &old_agi, sizeof(old_agi));
return error; return error;
} }
...@@ -662,7 +662,7 @@ xchk_bmap_check_rmaps( ...@@ -662,7 +662,7 @@ xchk_bmap_check_rmaps(
error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag); error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
if (error || if (error ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) { (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
xfs_perag_put(pag); xfs_perag_rele(pag);
return error; return error;
} }
} }
......
...@@ -478,15 +478,15 @@ xchk_ag_btcur_init( ...@@ -478,15 +478,15 @@ xchk_ag_btcur_init(
/* Set up a inobt cursor for cross-referencing. */ /* Set up a inobt cursor for cross-referencing. */
if (sa->agi_bp && if (sa->agi_bp &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) { xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, sa->agi_bp,
sa->pag, XFS_BTNUM_INO); XFS_BTNUM_INO);
} }
/* Set up a finobt cursor for cross-referencing. */ /* Set up a finobt cursor for cross-referencing. */
if (sa->agi_bp && xfs_has_finobt(mp) && if (sa->agi_bp && xfs_has_finobt(mp) &&
xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) { xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, sa->fino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, sa->agi_bp,
sa->pag, XFS_BTNUM_FINO); XFS_BTNUM_FINO);
} }
/* Set up a rmapbt cursor for cross-referencing. */ /* Set up a rmapbt cursor for cross-referencing. */
...@@ -636,6 +636,7 @@ xchk_get_inode( ...@@ -636,6 +636,7 @@ xchk_get_inode(
{ {
struct xfs_imap imap; struct xfs_imap imap;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_perag *pag;
struct xfs_inode *ip_in = XFS_I(file_inode(sc->file)); struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
struct xfs_inode *ip = NULL; struct xfs_inode *ip = NULL;
int error; int error;
...@@ -671,10 +672,14 @@ xchk_get_inode( ...@@ -671,10 +672,14 @@ xchk_get_inode(
* Otherwise, we really couldn't find it so tell userspace * Otherwise, we really couldn't find it so tell userspace
* that it no longer exists. * that it no longer exists.
*/ */
error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap, pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino));
if (pag) {
error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap,
XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE); XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
xfs_perag_put(pag);
if (error) if (error)
return -ENOENT; return -ENOENT;
}
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
fallthrough; fallthrough;
default: default:
......
...@@ -86,7 +86,8 @@ xchk_fscount_warmup( ...@@ -86,7 +86,8 @@ xchk_fscount_warmup(
for_each_perag(mp, agno, pag) { for_each_perag(mp, agno, pag) {
if (xchk_should_terminate(sc, &error)) if (xchk_should_terminate(sc, &error))
break; break;
if (pag->pagi_init && pag->pagf_init) if (xfs_perag_initialised_agi(pag) &&
xfs_perag_initialised_agf(pag))
continue; continue;
/* Lock both AG headers. */ /* Lock both AG headers. */
...@@ -101,7 +102,8 @@ xchk_fscount_warmup( ...@@ -101,7 +102,8 @@ xchk_fscount_warmup(
* These are supposed to be initialized by the header read * These are supposed to be initialized by the header read
* function. * function.
*/ */
if (!pag->pagi_init || !pag->pagf_init) { if (!xfs_perag_initialised_agi(pag) ||
!xfs_perag_initialised_agf(pag)) {
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
break; break;
} }
...@@ -117,7 +119,7 @@ xchk_fscount_warmup( ...@@ -117,7 +119,7 @@ xchk_fscount_warmup(
if (agi_bp) if (agi_bp)
xfs_buf_relse(agi_bp); xfs_buf_relse(agi_bp);
if (pag) if (pag)
xfs_perag_put(pag); xfs_perag_rele(pag);
return error; return error;
} }
...@@ -220,7 +222,8 @@ xchk_fscount_aggregate_agcounts( ...@@ -220,7 +222,8 @@ xchk_fscount_aggregate_agcounts(
break; break;
/* This somehow got unset since the warmup? */ /* This somehow got unset since the warmup? */
if (!pag->pagi_init || !pag->pagf_init) { if (!xfs_perag_initialised_agi(pag) ||
!xfs_perag_initialised_agf(pag)) {
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
break; break;
} }
...@@ -249,7 +252,7 @@ xchk_fscount_aggregate_agcounts( ...@@ -249,7 +252,7 @@ xchk_fscount_aggregate_agcounts(
} }
if (pag) if (pag)
xfs_perag_put(pag); xfs_perag_rele(pag);
if (error) { if (error) {
xchk_set_incomplete(sc); xchk_set_incomplete(sc);
return error; return error;
......
...@@ -206,7 +206,7 @@ xrep_calc_ag_resblks( ...@@ -206,7 +206,7 @@ xrep_calc_ag_resblks(
return 0; return 0;
pag = xfs_perag_get(mp, sm->sm_agno); pag = xfs_perag_get(mp, sm->sm_agno);
if (pag->pagi_init) { if (xfs_perag_initialised_agi(pag)) {
/* Use in-core icount if possible. */ /* Use in-core icount if possible. */
icount = pag->pagi_count; icount = pag->pagi_count;
} else { } else {
...@@ -326,15 +326,14 @@ xrep_alloc_ag_block( ...@@ -326,15 +326,14 @@ xrep_alloc_ag_block(
args.tp = sc->tp; args.tp = sc->tp;
args.mp = sc->mp; args.mp = sc->mp;
args.pag = sc->sa.pag;
args.oinfo = *oinfo; args.oinfo = *oinfo;
args.fsbno = XFS_AGB_TO_FSB(args.mp, sc->sa.pag->pag_agno, 0);
args.minlen = 1; args.minlen = 1;
args.maxlen = 1; args.maxlen = 1;
args.prod = 1; args.prod = 1;
args.type = XFS_ALLOCTYPE_THIS_AG;
args.resv = resv; args.resv = resv;
error = xfs_alloc_vextent(&args); error = xfs_alloc_vextent_this_ag(&args, sc->sa.pag->pag_agno);
if (error) if (error)
return error; return error;
if (args.fsbno == NULLFSBLOCK) if (args.fsbno == NULLFSBLOCK)
......
...@@ -1410,7 +1410,7 @@ xfs_swap_extent_rmap( ...@@ -1410,7 +1410,7 @@ xfs_swap_extent_rmap(
/* Unmap the old blocks in the source file. */ /* Unmap the old blocks in the source file. */
while (tirec.br_blockcount) { while (tirec.br_blockcount) {
ASSERT(tp->t_firstblock == NULLFSBLOCK); ASSERT(tp->t_highest_agno == NULLAGNUMBER);
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec); trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
/* Read extent from the source file */ /* Read extent from the source file */
......
...@@ -21,23 +21,20 @@ ...@@ -21,23 +21,20 @@
STATIC int STATIC int
xfs_trim_extents( xfs_trim_extents(
struct xfs_mount *mp, struct xfs_perag *pag,
xfs_agnumber_t agno,
xfs_daddr_t start, xfs_daddr_t start,
xfs_daddr_t end, xfs_daddr_t end,
xfs_daddr_t minlen, xfs_daddr_t minlen,
uint64_t *blocks_trimmed) uint64_t *blocks_trimmed)
{ {
struct xfs_mount *mp = pag->pag_mount;
struct block_device *bdev = mp->m_ddev_targp->bt_bdev; struct block_device *bdev = mp->m_ddev_targp->bt_bdev;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
struct xfs_buf *agbp; struct xfs_buf *agbp;
struct xfs_agf *agf; struct xfs_agf *agf;
struct xfs_perag *pag;
int error; int error;
int i; int i;
pag = xfs_perag_get(mp, agno);
/* /*
* Force out the log. This means any transactions that might have freed * Force out the log. This means any transactions that might have freed
* space before we take the AGF buffer lock are now on disk, and the * space before we take the AGF buffer lock are now on disk, and the
...@@ -47,7 +44,7 @@ xfs_trim_extents( ...@@ -47,7 +44,7 @@ xfs_trim_extents(
error = xfs_alloc_read_agf(pag, NULL, 0, &agbp); error = xfs_alloc_read_agf(pag, NULL, 0, &agbp);
if (error) if (error)
goto out_put_perag; return error;
agf = agbp->b_addr; agf = agbp->b_addr;
cur = xfs_allocbt_init_cursor(mp, NULL, agbp, pag, XFS_BTNUM_CNT); cur = xfs_allocbt_init_cursor(mp, NULL, agbp, pag, XFS_BTNUM_CNT);
...@@ -71,10 +68,10 @@ xfs_trim_extents( ...@@ -71,10 +68,10 @@ xfs_trim_extents(
error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
if (error) if (error)
goto out_del_cursor; break;
if (XFS_IS_CORRUPT(mp, i != 1)) { if (XFS_IS_CORRUPT(mp, i != 1)) {
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
goto out_del_cursor; break;
} }
ASSERT(flen <= be32_to_cpu(agf->agf_longest)); ASSERT(flen <= be32_to_cpu(agf->agf_longest));
...@@ -83,15 +80,15 @@ xfs_trim_extents( ...@@ -83,15 +80,15 @@ xfs_trim_extents(
* the format the range/len variables are supplied in by * the format the range/len variables are supplied in by
* userspace. * userspace.
*/ */
dbno = XFS_AGB_TO_DADDR(mp, agno, fbno); dbno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, fbno);
dlen = XFS_FSB_TO_BB(mp, flen); dlen = XFS_FSB_TO_BB(mp, flen);
/* /*
* Too small? Give up. * Too small? Give up.
*/ */
if (dlen < minlen) { if (dlen < minlen) {
trace_xfs_discard_toosmall(mp, agno, fbno, flen); trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
goto out_del_cursor; break;
} }
/* /*
...@@ -100,7 +97,7 @@ xfs_trim_extents( ...@@ -100,7 +97,7 @@ xfs_trim_extents(
* down partially overlapping ranges for now. * down partially overlapping ranges for now.
*/ */
if (dbno + dlen < start || dbno > end) { if (dbno + dlen < start || dbno > end) {
trace_xfs_discard_exclude(mp, agno, fbno, flen); trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
goto next_extent; goto next_extent;
} }
...@@ -109,32 +106,30 @@ xfs_trim_extents( ...@@ -109,32 +106,30 @@ xfs_trim_extents(
* discard and try again the next time. * discard and try again the next time.
*/ */
if (xfs_extent_busy_search(mp, pag, fbno, flen)) { if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
trace_xfs_discard_busy(mp, agno, fbno, flen); trace_xfs_discard_busy(mp, pag->pag_agno, fbno, flen);
goto next_extent; goto next_extent;
} }
trace_xfs_discard_extent(mp, agno, fbno, flen); trace_xfs_discard_extent(mp, pag->pag_agno, fbno, flen);
error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS); error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS);
if (error) if (error)
goto out_del_cursor; break;
*blocks_trimmed += flen; *blocks_trimmed += flen;
next_extent: next_extent:
error = xfs_btree_decrement(cur, 0, &i); error = xfs_btree_decrement(cur, 0, &i);
if (error) if (error)
goto out_del_cursor; break;
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
error = -ERESTARTSYS; error = -ERESTARTSYS;
goto out_del_cursor; break;
} }
} }
out_del_cursor: out_del_cursor:
xfs_btree_del_cursor(cur, error); xfs_btree_del_cursor(cur, error);
xfs_buf_relse(agbp); xfs_buf_relse(agbp);
out_put_perag:
xfs_perag_put(pag);
return error; return error;
} }
...@@ -152,11 +147,12 @@ xfs_ioc_trim( ...@@ -152,11 +147,12 @@ xfs_ioc_trim(
struct xfs_mount *mp, struct xfs_mount *mp,
struct fstrim_range __user *urange) struct fstrim_range __user *urange)
{ {
struct xfs_perag *pag;
unsigned int granularity = unsigned int granularity =
bdev_discard_granularity(mp->m_ddev_targp->bt_bdev); bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
struct fstrim_range range; struct fstrim_range range;
xfs_daddr_t start, end, minlen; xfs_daddr_t start, end, minlen;
xfs_agnumber_t start_agno, end_agno, agno; xfs_agnumber_t agno;
uint64_t blocks_trimmed = 0; uint64_t blocks_trimmed = 0;
int error, last_error = 0; int error, last_error = 0;
...@@ -193,20 +189,20 @@ xfs_ioc_trim( ...@@ -193,20 +189,20 @@ xfs_ioc_trim(
end = start + BTOBBT(range.len) - 1; end = start + BTOBBT(range.len) - 1;
if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1) if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1;
start_agno = xfs_daddr_to_agno(mp, start); agno = xfs_daddr_to_agno(mp, start);
end_agno = xfs_daddr_to_agno(mp, end); for_each_perag_range(mp, agno, xfs_daddr_to_agno(mp, end), pag) {
error = xfs_trim_extents(pag, start, end, minlen,
for (agno = start_agno; agno <= end_agno; agno++) {
error = xfs_trim_extents(mp, agno, start, end, minlen,
&blocks_trimmed); &blocks_trimmed);
if (error) { if (error) {
last_error = error; last_error = error;
if (error == -ERESTARTSYS) if (error == -ERESTARTSYS) {
xfs_perag_rele(pag);
break; break;
} }
} }
}
if (last_error) if (last_error)
return last_error; return last_error;
......
This diff is collapsed.
...@@ -9,13 +9,13 @@ ...@@ -9,13 +9,13 @@
struct xfs_mount; struct xfs_mount;
struct xfs_inode; struct xfs_inode;
struct xfs_bmalloca; struct xfs_bmalloca;
struct xfs_alloc_arg;
int xfs_filestream_mount(struct xfs_mount *mp); int xfs_filestream_mount(struct xfs_mount *mp);
void xfs_filestream_unmount(struct xfs_mount *mp); void xfs_filestream_unmount(struct xfs_mount *mp);
void xfs_filestream_deassociate(struct xfs_inode *ip); void xfs_filestream_deassociate(struct xfs_inode *ip);
xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip); int xfs_filestream_select_ag(struct xfs_bmalloca *ap,
int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp); struct xfs_alloc_arg *args, xfs_extlen_t *blen);
int xfs_filestream_peek_ag(struct xfs_mount *mp, xfs_agnumber_t agno);
static inline int static inline int
xfs_inode_is_filestream( xfs_inode_is_filestream(
......
...@@ -688,11 +688,11 @@ __xfs_getfsmap_datadev( ...@@ -688,11 +688,11 @@ __xfs_getfsmap_datadev(
info->agf_bp = NULL; info->agf_bp = NULL;
} }
if (info->pag) { if (info->pag) {
xfs_perag_put(info->pag); xfs_perag_rele(info->pag);
info->pag = NULL; info->pag = NULL;
} else if (pag) { } else if (pag) {
/* loop termination case */ /* loop termination case */
xfs_perag_put(pag); xfs_perag_rele(pag);
} }
return error; return error;
...@@ -761,6 +761,7 @@ xfs_getfsmap_datadev_bnobt( ...@@ -761,6 +761,7 @@ xfs_getfsmap_datadev_bnobt(
{ {
struct xfs_alloc_rec_incore akeys[2]; struct xfs_alloc_rec_incore akeys[2];
memset(akeys, 0, sizeof(akeys));
info->missing_owner = XFS_FMR_OWN_UNKNOWN; info->missing_owner = XFS_FMR_OWN_UNKNOWN;
return __xfs_getfsmap_datadev(tp, keys, info, return __xfs_getfsmap_datadev(tp, keys, info,
xfs_getfsmap_datadev_bnobt_query, &akeys[0]); xfs_getfsmap_datadev_bnobt_query, &akeys[0]);
......
...@@ -255,7 +255,7 @@ xfs_perag_set_inode_tag( ...@@ -255,7 +255,7 @@ xfs_perag_set_inode_tag(
break; break;
} }
trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
} }
/* Clear a tag on both the AG incore inode tree and the AG radix tree. */ /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
...@@ -289,7 +289,7 @@ xfs_perag_clear_inode_tag( ...@@ -289,7 +289,7 @@ xfs_perag_clear_inode_tag(
radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag); radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
spin_unlock(&mp->m_perag_lock); spin_unlock(&mp->m_perag_lock);
trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
} }
/* /*
...@@ -586,7 +586,7 @@ xfs_iget_cache_miss( ...@@ -586,7 +586,7 @@ xfs_iget_cache_miss(
if (!ip) if (!ip)
return -ENOMEM; return -ENOMEM;
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
if (error) if (error)
goto out_destroy; goto out_destroy;
...@@ -1767,7 +1767,7 @@ xfs_icwalk( ...@@ -1767,7 +1767,7 @@ xfs_icwalk(
if (error) { if (error) {
last_error = error; last_error = error;
if (error == -EFSCORRUPTED) { if (error == -EFSCORRUPTED) {
xfs_perag_put(pag); xfs_perag_rele(pag);
break; break;
} }
} }
......
...@@ -1367,7 +1367,7 @@ xfs_itruncate_extents_flags( ...@@ -1367,7 +1367,7 @@ xfs_itruncate_extents_flags(
unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
while (unmap_len > 0) { while (unmap_len > 0) {
ASSERT(tp->t_firstblock == NULLFSBLOCK); ASSERT(tp->t_highest_agno == NULLAGNUMBER);
error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
flags, XFS_ITRUNC_MAX_EXTENTS); flags, XFS_ITRUNC_MAX_EXTENTS);
if (error) if (error)
......
...@@ -275,7 +275,7 @@ xfs_iwalk_ag_start( ...@@ -275,7 +275,7 @@ xfs_iwalk_ag_start(
/* Set up a fresh cursor and empty the inobt cache. */ /* Set up a fresh cursor and empty the inobt cache. */
iwag->nr_recs = 0; iwag->nr_recs = 0;
error = xfs_inobt_cur(mp, tp, pag, XFS_BTNUM_INO, curpp, agi_bpp); error = xfs_inobt_cur(pag, tp, XFS_BTNUM_INO, curpp, agi_bpp);
if (error) if (error)
return error; return error;
...@@ -390,7 +390,7 @@ xfs_iwalk_run_callbacks( ...@@ -390,7 +390,7 @@ xfs_iwalk_run_callbacks(
} }
/* ...and recreate the cursor just past where we left off. */ /* ...and recreate the cursor just past where we left off. */
error = xfs_inobt_cur(mp, iwag->tp, iwag->pag, XFS_BTNUM_INO, curpp, error = xfs_inobt_cur(iwag->pag, iwag->tp, XFS_BTNUM_INO, curpp,
agi_bpp); agi_bpp);
if (error) if (error)
return error; return error;
...@@ -591,7 +591,7 @@ xfs_iwalk( ...@@ -591,7 +591,7 @@ xfs_iwalk(
} }
if (iwag.pag) if (iwag.pag)
xfs_perag_put(pag); xfs_perag_rele(pag);
xfs_iwalk_free(&iwag); xfs_iwalk_free(&iwag);
return error; return error;
} }
...@@ -683,7 +683,7 @@ xfs_iwalk_threaded( ...@@ -683,7 +683,7 @@ xfs_iwalk_threaded(
break; break;
} }
if (pag) if (pag)
xfs_perag_put(pag); xfs_perag_rele(pag);
if (polled) if (polled)
xfs_pwork_poll(&pctl); xfs_pwork_poll(&pctl);
return xfs_pwork_destroy(&pctl); return xfs_pwork_destroy(&pctl);
...@@ -776,7 +776,7 @@ xfs_inobt_walk( ...@@ -776,7 +776,7 @@ xfs_inobt_walk(
} }
if (iwag.pag) if (iwag.pag)
xfs_perag_put(pag); xfs_perag_rele(pag);
xfs_iwalk_free(&iwag); xfs_iwalk_free(&iwag);
return error; return error;
} }
...@@ -210,8 +210,7 @@ typedef struct xfs_mount { ...@@ -210,8 +210,7 @@ typedef struct xfs_mount {
struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
struct xstats m_stats; /* per-fs stats */ struct xstats m_stats; /* per-fs stats */
xfs_agnumber_t m_agfrotor; /* last ag where space found */ xfs_agnumber_t m_agfrotor; /* last ag where space found */
xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ atomic_t m_agirotor; /* last ag dir inode alloced */
spinlock_t m_agirotor_lock;/* .. and lock protecting it */
/* Memory shrinker to throttle and reprioritize inodegc */ /* Memory shrinker to throttle and reprioritize inodegc */
struct shrinker m_inodegc_shrinker; struct shrinker m_inodegc_shrinker;
......
...@@ -610,7 +610,7 @@ xfs_reflink_cancel_cow_blocks( ...@@ -610,7 +610,7 @@ xfs_reflink_cancel_cow_blocks(
if (error) if (error)
break; break;
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) { } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
ASSERT((*tpp)->t_firstblock == NULLFSBLOCK); ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
/* Free the CoW orphan record. */ /* Free the CoW orphan record. */
xfs_refcount_free_cow_extent(*tpp, del.br_startblock, xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
...@@ -927,7 +927,7 @@ xfs_reflink_recover_cow( ...@@ -927,7 +927,7 @@ xfs_reflink_recover_cow(
for_each_perag(mp, agno, pag) { for_each_perag(mp, agno, pag) {
error = xfs_refcount_recover_cow_leftovers(mp, pag); error = xfs_refcount_recover_cow_leftovers(mp, pag);
if (error) { if (error) {
xfs_perag_put(pag); xfs_perag_rele(pag);
break; break;
} }
} }
......
...@@ -247,6 +247,32 @@ xfs_fs_show_options( ...@@ -247,6 +247,32 @@ xfs_fs_show_options(
return 0; return 0;
} }
static bool
xfs_set_inode_alloc_perag(
struct xfs_perag *pag,
xfs_ino_t ino,
xfs_agnumber_t max_metadata)
{
if (!xfs_is_inode32(pag->pag_mount)) {
set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
return false;
}
if (ino > XFS_MAXINUMBER_32) {
clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
return false;
}
set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
if (pag->pag_agno < max_metadata)
set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
else
clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
return true;
}
/* /*
* Set parameters for inode allocation heuristics, taking into account * Set parameters for inode allocation heuristics, taking into account
* filesystem size and inode32/inode64 mount options; i.e. specifically * filesystem size and inode32/inode64 mount options; i.e. specifically
...@@ -310,24 +336,8 @@ xfs_set_inode_alloc( ...@@ -310,24 +336,8 @@ xfs_set_inode_alloc(
ino = XFS_AGINO_TO_INO(mp, index, agino); ino = XFS_AGINO_TO_INO(mp, index, agino);
pag = xfs_perag_get(mp, index); pag = xfs_perag_get(mp, index);
if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
if (xfs_is_inode32(mp)) {
if (ino > XFS_MAXINUMBER_32) {
pag->pagi_inodeok = 0;
pag->pagf_metadata = 0;
} else {
pag->pagi_inodeok = 1;
maxagi++; maxagi++;
if (index < max_metadata)
pag->pagf_metadata = 1;
else
pag->pagf_metadata = 0;
}
} else {
pag->pagi_inodeok = 1;
pag->pagf_metadata = 0;
}
xfs_perag_put(pag); xfs_perag_put(pag);
} }
...@@ -1922,7 +1932,6 @@ static int xfs_init_fs_context( ...@@ -1922,7 +1932,6 @@ static int xfs_init_fs_context(
return -ENOMEM; return -ENOMEM;
spin_lock_init(&mp->m_sb_lock); spin_lock_init(&mp->m_sb_lock);
spin_lock_init(&mp->m_agirotor_lock);
INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
spin_lock_init(&mp->m_perag_lock); spin_lock_init(&mp->m_perag_lock);
mutex_init(&mp->m_growlock); mutex_init(&mp->m_growlock);
......
...@@ -74,6 +74,7 @@ struct xfs_inobt_rec_incore; ...@@ -74,6 +74,7 @@ struct xfs_inobt_rec_incore;
union xfs_btree_ptr; union xfs_btree_ptr;
struct xfs_dqtrx; struct xfs_dqtrx;
struct xfs_icwalk; struct xfs_icwalk;
struct xfs_perag;
#define XFS_ATTR_FILTER_FLAGS \ #define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \ { XFS_ATTR_ROOT, "ROOT" }, \
...@@ -159,36 +160,40 @@ TRACE_EVENT(xlog_intent_recovery_failed, ...@@ -159,36 +160,40 @@ TRACE_EVENT(xlog_intent_recovery_failed,
); );
DECLARE_EVENT_CLASS(xfs_perag_class, DECLARE_EVENT_CLASS(xfs_perag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip),
unsigned long caller_ip), TP_ARGS(pag, caller_ip),
TP_ARGS(mp, agno, refcount, caller_ip),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(xfs_agnumber_t, agno) __field(xfs_agnumber_t, agno)
__field(int, refcount) __field(int, refcount)
__field(int, active_refcount)
__field(unsigned long, caller_ip) __field(unsigned long, caller_ip)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = mp->m_super->s_dev; __entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = agno; __entry->agno = pag->pag_agno;
__entry->refcount = refcount; __entry->refcount = atomic_read(&pag->pag_ref);
__entry->active_refcount = atomic_read(&pag->pag_active_ref);
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
), ),
TP_printk("dev %d:%d agno 0x%x refcount %d caller %pS", TP_printk("dev %d:%d agno 0x%x passive refs %d active refs %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno, __entry->agno,
__entry->refcount, __entry->refcount,
__entry->active_refcount,
(char *)__entry->caller_ip) (char *)__entry->caller_ip)
); );
#define DEFINE_PERAG_REF_EVENT(name) \ #define DEFINE_PERAG_REF_EVENT(name) \
DEFINE_EVENT(xfs_perag_class, name, \ DEFINE_EVENT(xfs_perag_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \ TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip), \
unsigned long caller_ip), \ TP_ARGS(pag, caller_ip))
TP_ARGS(mp, agno, refcount, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_get); DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag); DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put); DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_grab);
DEFINE_PERAG_REF_EVENT(xfs_perag_grab_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_rele);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag); DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag); DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
...@@ -634,8 +639,8 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); ...@@ -634,8 +639,8 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
DECLARE_EVENT_CLASS(xfs_filestream_class, DECLARE_EVENT_CLASS(xfs_filestream_class,
TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno), TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino),
TP_ARGS(mp, ino, agno), TP_ARGS(pag, ino),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(xfs_ino_t, ino) __field(xfs_ino_t, ino)
...@@ -643,10 +648,10 @@ DECLARE_EVENT_CLASS(xfs_filestream_class, ...@@ -643,10 +648,10 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
__field(int, streams) __field(int, streams)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = mp->m_super->s_dev; __entry->dev = pag->pag_mount->m_super->s_dev;
__entry->ino = ino; __entry->ino = ino;
__entry->agno = agno; __entry->agno = pag->pag_agno;
__entry->streams = xfs_filestream_peek_ag(mp, agno); __entry->streams = atomic_read(&pag->pagf_fstrms);
), ),
TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d", TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
...@@ -656,39 +661,40 @@ DECLARE_EVENT_CLASS(xfs_filestream_class, ...@@ -656,39 +661,40 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
) )
#define DEFINE_FILESTREAM_EVENT(name) \ #define DEFINE_FILESTREAM_EVENT(name) \
DEFINE_EVENT(xfs_filestream_class, name, \ DEFINE_EVENT(xfs_filestream_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno), \ TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino), \
TP_ARGS(mp, ino, agno)) TP_ARGS(pag, ino))
DEFINE_FILESTREAM_EVENT(xfs_filestream_free); DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup); DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
DEFINE_FILESTREAM_EVENT(xfs_filestream_scan); DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);
TRACE_EVENT(xfs_filestream_pick, TRACE_EVENT(xfs_filestream_pick,
TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno, TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino, xfs_extlen_t free),
xfs_extlen_t free, int nscan), TP_ARGS(pag, ino, free),
TP_ARGS(ip, agno, free, nscan),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(xfs_ino_t, ino) __field(xfs_ino_t, ino)
__field(xfs_agnumber_t, agno) __field(xfs_agnumber_t, agno)
__field(int, streams) __field(int, streams)
__field(xfs_extlen_t, free) __field(xfs_extlen_t, free)
__field(int, nscan)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->dev = pag->pag_mount->m_super->s_dev;
__entry->ino = ip->i_ino; __entry->ino = ino;
__entry->agno = agno; if (pag) {
__entry->streams = xfs_filestream_peek_ag(ip->i_mount, agno); __entry->agno = pag->pag_agno;
__entry->streams = atomic_read(&pag->pagf_fstrms);
} else {
__entry->agno = NULLAGNUMBER;
__entry->streams = 0;
}
__entry->free = free; __entry->free = free;
__entry->nscan = nscan;
), ),
TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d free %d nscan %d", TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d free %d",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino, __entry->ino,
__entry->agno, __entry->agno,
__entry->streams, __entry->streams,
__entry->free, __entry->free)
__entry->nscan)
); );
DECLARE_EVENT_CLASS(xfs_lock_class, DECLARE_EVENT_CLASS(xfs_lock_class,
...@@ -1795,13 +1801,11 @@ DECLARE_EVENT_CLASS(xfs_alloc_class, ...@@ -1795,13 +1801,11 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__field(xfs_extlen_t, alignment) __field(xfs_extlen_t, alignment)
__field(xfs_extlen_t, minalignslop) __field(xfs_extlen_t, minalignslop)
__field(xfs_extlen_t, len) __field(xfs_extlen_t, len)
__field(short, type)
__field(short, otype)
__field(char, wasdel) __field(char, wasdel)
__field(char, wasfromfl) __field(char, wasfromfl)
__field(int, resv) __field(int, resv)
__field(int, datatype) __field(int, datatype)
__field(xfs_fsblock_t, firstblock) __field(xfs_agnumber_t, highest_agno)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = args->mp->m_super->s_dev; __entry->dev = args->mp->m_super->s_dev;
...@@ -1816,18 +1820,16 @@ DECLARE_EVENT_CLASS(xfs_alloc_class, ...@@ -1816,18 +1820,16 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__entry->alignment = args->alignment; __entry->alignment = args->alignment;
__entry->minalignslop = args->minalignslop; __entry->minalignslop = args->minalignslop;
__entry->len = args->len; __entry->len = args->len;
__entry->type = args->type;
__entry->otype = args->otype;
__entry->wasdel = args->wasdel; __entry->wasdel = args->wasdel;
__entry->wasfromfl = args->wasfromfl; __entry->wasfromfl = args->wasfromfl;
__entry->resv = args->resv; __entry->resv = args->resv;
__entry->datatype = args->datatype; __entry->datatype = args->datatype;
__entry->firstblock = args->tp->t_firstblock; __entry->highest_agno = args->tp->t_highest_agno;
), ),
TP_printk("dev %d:%d agno 0x%x agbno 0x%x minlen %u maxlen %u mod %u " TP_printk("dev %d:%d agno 0x%x agbno 0x%x minlen %u maxlen %u mod %u "
"prod %u minleft %u total %u alignment %u minalignslop %u " "prod %u minleft %u total %u alignment %u minalignslop %u "
"len %u type %s otype %s wasdel %d wasfromfl %d resv %d " "len %u wasdel %d wasfromfl %d resv %d "
"datatype 0x%x firstblock 0x%llx", "datatype 0x%x highest_agno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno, __entry->agno,
__entry->agbno, __entry->agbno,
...@@ -1840,13 +1842,11 @@ DECLARE_EVENT_CLASS(xfs_alloc_class, ...@@ -1840,13 +1842,11 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__entry->alignment, __entry->alignment,
__entry->minalignslop, __entry->minalignslop,
__entry->len, __entry->len,
__print_symbolic(__entry->type, XFS_ALLOC_TYPES),
__print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
__entry->wasdel, __entry->wasdel,
__entry->wasfromfl, __entry->wasfromfl,
__entry->resv, __entry->resv,
__entry->datatype, __entry->datatype,
(unsigned long long)__entry->firstblock) __entry->highest_agno)
) )
#define DEFINE_ALLOC_EVENT(name) \ #define DEFINE_ALLOC_EVENT(name) \
...@@ -1877,6 +1877,7 @@ DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough); ...@@ -1877,6 +1877,7 @@ DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
DEFINE_ALLOC_EVENT(xfs_alloc_small_done); DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
DEFINE_ALLOC_EVENT(xfs_alloc_small_error); DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_skip_deadlock);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
......
...@@ -102,7 +102,7 @@ xfs_trans_dup( ...@@ -102,7 +102,7 @@ xfs_trans_dup(
INIT_LIST_HEAD(&ntp->t_items); INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy); INIT_LIST_HEAD(&ntp->t_busy);
INIT_LIST_HEAD(&ntp->t_dfops); INIT_LIST_HEAD(&ntp->t_dfops);
ntp->t_firstblock = NULLFSBLOCK; ntp->t_highest_agno = NULLAGNUMBER;
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(tp->t_ticket != NULL); ASSERT(tp->t_ticket != NULL);
...@@ -278,7 +278,7 @@ xfs_trans_alloc( ...@@ -278,7 +278,7 @@ xfs_trans_alloc(
INIT_LIST_HEAD(&tp->t_items); INIT_LIST_HEAD(&tp->t_items);
INIT_LIST_HEAD(&tp->t_busy); INIT_LIST_HEAD(&tp->t_busy);
INIT_LIST_HEAD(&tp->t_dfops); INIT_LIST_HEAD(&tp->t_dfops);
tp->t_firstblock = NULLFSBLOCK; tp->t_highest_agno = NULLAGNUMBER;
error = xfs_trans_reserve(tp, resp, blocks, rtextents); error = xfs_trans_reserve(tp, resp, blocks, rtextents);
if (error == -ENOSPC && want_retry) { if (error == -ENOSPC && want_retry) {
...@@ -1078,10 +1078,10 @@ xfs_trans_cancel( ...@@ -1078,10 +1078,10 @@ xfs_trans_cancel(
/* /*
* It's never valid to cancel a transaction with deferred ops attached, * It's never valid to cancel a transaction with deferred ops attached,
* because the transaction is effectively dirty. Complain about this * because the transaction is effectively dirty. Complain about this
* loudly before freeing the in-memory defer items. * loudly before freeing the in-memory defer items and shutting down the
* filesystem.
*/ */
if (!list_empty(&tp->t_dfops)) { if (!list_empty(&tp->t_dfops)) {
ASSERT(xfs_is_shutdown(mp) || list_empty(&tp->t_dfops));
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
dirty = true; dirty = true;
xfs_defer_cancel(tp); xfs_defer_cancel(tp);
......
...@@ -132,7 +132,7 @@ typedef struct xfs_trans { ...@@ -132,7 +132,7 @@ typedef struct xfs_trans {
unsigned int t_rtx_res; /* # of rt extents resvd */ unsigned int t_rtx_res; /* # of rt extents resvd */
unsigned int t_rtx_res_used; /* # of resvd rt extents used */ unsigned int t_rtx_res_used; /* # of resvd rt extents used */
unsigned int t_flags; /* misc flags */ unsigned int t_flags; /* misc flags */
xfs_fsblock_t t_firstblock; /* first block allocated */ xfs_agnumber_t t_highest_agno; /* highest AGF locked */
struct xlog_ticket *t_ticket; /* log mgr ticket */ struct xlog_ticket *t_ticket; /* log mgr ticket */
struct xfs_mount *t_mountp; /* ptr to fs mount struct */ struct xfs_mount *t_mountp; /* ptr to fs mount struct */
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment