Commit a78ee256 authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: convert XFS_AGFL_SIZE to a helper function

The AGFL size calculation is about to get more complex, so lets turn
the macro into a function first and remove the macro.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
[darrick: forward port to newer kernel, simplify the helper]
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 6231848c
...@@ -53,6 +53,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); ...@@ -53,6 +53,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
/*
* Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
* the beginning of the block for a proper header with the location information
* and CRC.
*/
unsigned int
xfs_agfl_size(
struct xfs_mount *mp)
{
unsigned int size = mp->m_sb.sb_sectsize;
if (xfs_sb_version_hascrc(&mp->m_sb))
size -= sizeof(struct xfs_agfl);
return size / sizeof(xfs_agblock_t);
}
unsigned int unsigned int
xfs_refc_block( xfs_refc_block(
struct xfs_mount *mp) struct xfs_mount *mp)
...@@ -550,7 +567,7 @@ xfs_agfl_verify( ...@@ -550,7 +567,7 @@ xfs_agfl_verify(
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
return __this_address; return __this_address;
for (i = 0; i < XFS_AGFL_SIZE(mp); i++) { for (i = 0; i < xfs_agfl_size(mp); i++) {
if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK && if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks) be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
return __this_address; return __this_address;
...@@ -2266,7 +2283,7 @@ xfs_alloc_get_freelist( ...@@ -2266,7 +2283,7 @@ xfs_alloc_get_freelist(
bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
be32_add_cpu(&agf->agf_flfirst, 1); be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp); xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
agf->agf_flfirst = 0; agf->agf_flfirst = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
...@@ -2377,7 +2394,7 @@ xfs_alloc_put_freelist( ...@@ -2377,7 +2394,7 @@ xfs_alloc_put_freelist(
be32_to_cpu(agf->agf_seqno), &agflbp))) be32_to_cpu(agf->agf_seqno), &agflbp)))
return error; return error;
be32_add_cpu(&agf->agf_fllast, 1); be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
agf->agf_fllast = 0; agf->agf_fllast = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
...@@ -2395,7 +2412,7 @@ xfs_alloc_put_freelist( ...@@ -2395,7 +2412,7 @@ xfs_alloc_put_freelist(
xfs_alloc_log_agf(tp, agbp, logflags); xfs_alloc_log_agf(tp, agbp, logflags);
ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)]; blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
...@@ -2428,9 +2445,9 @@ xfs_agf_verify( ...@@ -2428,9 +2445,9 @@ xfs_agf_verify(
if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp))) be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
return __this_address; return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
......
...@@ -26,6 +26,8 @@ struct xfs_trans; ...@@ -26,6 +26,8 @@ struct xfs_trans;
extern struct workqueue_struct *xfs_alloc_wq; extern struct workqueue_struct *xfs_alloc_wq;
unsigned int xfs_agfl_size(struct xfs_mount *mp);
/* /*
* Freespace allocation types. Argument to xfs_alloc_[v]extent. * Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/ */
......
...@@ -803,24 +803,13 @@ typedef struct xfs_agi { ...@@ -803,24 +803,13 @@ typedef struct xfs_agi {
&(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \ &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
(__be32 *)(bp)->b_addr) (__be32 *)(bp)->b_addr)
/*
* Size of the AGFL. For CRC-enabled filesystes we steal a couple of
* slots in the beginning of the block for a proper header with the
* location information and CRC.
*/
#define XFS_AGFL_SIZE(mp) \
(((mp)->m_sb.sb_sectsize - \
(xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
sizeof(struct xfs_agfl) : 0)) / \
sizeof(xfs_agblock_t))
typedef struct xfs_agfl { typedef struct xfs_agfl {
__be32 agfl_magicnum; __be32 agfl_magicnum;
__be32 agfl_seqno; __be32 agfl_seqno;
uuid_t agfl_uuid; uuid_t agfl_uuid;
__be64 agfl_lsn; __be64 agfl_lsn;
__be32 agfl_crc; __be32 agfl_crc;
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */ __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
} __attribute__((packed)) xfs_agfl_t; } __attribute__((packed)) xfs_agfl_t;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc) #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
......
...@@ -80,7 +80,7 @@ xfs_scrub_walk_agfl( ...@@ -80,7 +80,7 @@ xfs_scrub_walk_agfl(
} }
/* first to the end */ /* first to the end */
for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) { for (i = flfirst; i < xfs_agfl_size(mp); i++) {
error = fn(sc, be32_to_cpu(agfl_bno[i]), priv); error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
if (error) if (error)
return error; return error;
...@@ -664,7 +664,7 @@ xfs_scrub_agf( ...@@ -664,7 +664,7 @@ xfs_scrub_agf(
if (agfl_last > agfl_first) if (agfl_last > agfl_first)
fl_count = agfl_last - agfl_first + 1; fl_count = agfl_last - agfl_first + 1;
else else
fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1; fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
if (agfl_count != 0 && fl_count != agfl_count) if (agfl_count != 0 && fl_count != agfl_count)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
...@@ -791,7 +791,7 @@ xfs_scrub_agfl( ...@@ -791,7 +791,7 @@ xfs_scrub_agfl(
/* Allocate buffer to ensure uniqueness of AGFL entries. */ /* Allocate buffer to ensure uniqueness of AGFL entries. */
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
agflcount = be32_to_cpu(agf->agf_flcount); agflcount = be32_to_cpu(agf->agf_flcount);
if (agflcount > XFS_AGFL_SIZE(sc->mp)) { if (agflcount > xfs_agfl_size(sc->mp)) {
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
goto out; goto out;
} }
......
...@@ -217,7 +217,7 @@ xfs_growfs_data_private( ...@@ -217,7 +217,7 @@ xfs_growfs_data_private(
} }
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
error = xfs_bwrite(bp); error = xfs_bwrite(bp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment