Commit 46f23adf authored by Ben Myers's avatar Ben Myers

Merge branch 'xfs-factor-icluster-macros' into for-next

parents ffda4e83 f9e5abcf
...@@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment( ...@@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment(
{ {
if (xfs_sb_version_hasalign(&args->mp->m_sb) && if (xfs_sb_version_hasalign(&args->mp->m_sb) &&
args->mp->m_sb.sb_inoalignmt >= args->mp->m_sb.sb_inoalignmt >=
XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size))
return args->mp->m_sb.sb_inoalignmt; return args->mp->m_sb.sb_inoalignmt;
return 1; return 1;
} }
...@@ -170,27 +170,20 @@ xfs_ialloc_inode_init( ...@@ -170,27 +170,20 @@ xfs_ialloc_inode_init(
{ {
struct xfs_buf *fbuf; struct xfs_buf *fbuf;
struct xfs_dinode *free; struct xfs_dinode *free;
int blks_per_cluster, nbufs, ninodes; int nbufs, blks_per_cluster, inodes_per_cluster;
int version; int version;
int i, j; int i, j;
xfs_daddr_t d; xfs_daddr_t d;
xfs_ino_t ino = 0; xfs_ino_t ino = 0;
/* /*
* Loop over the new block(s), filling in the inodes. * Loop over the new block(s), filling in the inodes. For small block
* For small block sizes, manipulate the inodes in buffers * sizes, manipulate the inodes in buffers which are multiples of the
* which are multiples of the blocks size. * blocks size.
*/ */
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = xfs_icluster_size_fsb(mp);
blks_per_cluster = 1; inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
nbufs = length; nbufs = length / blks_per_cluster;
ninodes = mp->m_sb.sb_inopblock;
} else {
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
mp->m_sb.sb_blocksize;
nbufs = length / blks_per_cluster;
ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
}
/* /*
* Figure out what version number to use in the inodes we create. If * Figure out what version number to use in the inodes we create. If
...@@ -225,7 +218,7 @@ xfs_ialloc_inode_init( ...@@ -225,7 +218,7 @@ xfs_ialloc_inode_init(
* they track in the AIL as if they were physically logged. * they track in the AIL as if they were physically logged.
*/ */
if (tp) if (tp)
xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos,
mp->m_sb.sb_inodesize, length, gen); mp->m_sb.sb_inodesize, length, gen);
} else if (xfs_sb_version_hasnlink(&mp->m_sb)) } else if (xfs_sb_version_hasnlink(&mp->m_sb))
version = 2; version = 2;
...@@ -246,7 +239,7 @@ xfs_ialloc_inode_init( ...@@ -246,7 +239,7 @@ xfs_ialloc_inode_init(
/* Initialize the inode buffers and log them appropriately. */ /* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops; fbuf->b_ops = &xfs_inode_buf_ops;
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
for (i = 0; i < ninodes; i++) { for (i = 0; i < inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog; int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = xfs_dinode_size(version); uint isize = xfs_dinode_size(version);
...@@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc( ...@@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc(
* Locking will ensure that we don't have two callers in here * Locking will ensure that we don't have two callers in here
* at one time. * at one time.
*/ */
newlen = XFS_IALLOC_INODES(args.mp); newlen = args.mp->m_ialloc_inos;
if (args.mp->m_maxicount && if (args.mp->m_maxicount &&
args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
return XFS_ERROR(ENOSPC); return XFS_ERROR(ENOSPC);
args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); args.minlen = args.maxlen = args.mp->m_ialloc_blks;
/* /*
* First try to allocate inodes contiguous with the last-allocated * First try to allocate inodes contiguous with the last-allocated
* chunk of inodes. If the filesystem is striped, this will fill * chunk of inodes. If the filesystem is striped, this will fill
...@@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc( ...@@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc(
newino = be32_to_cpu(agi->agi_newino); newino = be32_to_cpu(agi->agi_newino);
agno = be32_to_cpu(agi->agi_seqno); agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
XFS_IALLOC_BLOCKS(args.mp); args.mp->m_ialloc_blks;
if (likely(newino != NULLAGINO && if (likely(newino != NULLAGINO &&
(args.agbno < be32_to_cpu(agi->agi_length)))) { (args.agbno < be32_to_cpu(agi->agi_length)))) {
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
...@@ -585,7 +578,7 @@ xfs_ialloc_ag_select( ...@@ -585,7 +578,7 @@ xfs_ialloc_ag_select(
* Is there enough free space for the file plus a block of * Is there enough free space for the file plus a block of
* inodes? (if we need to allocate some)? * inodes? (if we need to allocate some)?
*/ */
ineed = XFS_IALLOC_BLOCKS(mp); ineed = mp->m_ialloc_blks;
longest = pag->pagf_longest; longest = pag->pagf_longest;
if (!longest) if (!longest)
longest = pag->pagf_flcount > 0; longest = pag->pagf_flcount > 0;
...@@ -999,7 +992,7 @@ xfs_dialloc( ...@@ -999,7 +992,7 @@ xfs_dialloc(
* inode. * inode.
*/ */
if (mp->m_maxicount && if (mp->m_maxicount &&
mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) {
noroom = 1; noroom = 1;
okalloc = 0; okalloc = 0;
} }
...@@ -1202,7 +1195,7 @@ xfs_difree( ...@@ -1202,7 +1195,7 @@ xfs_difree(
* When an inode cluster is free, it becomes eligible for removal * When an inode cluster is free, it becomes eligible for removal
*/ */
if (!(mp->m_flags & XFS_MOUNT_IKEEP) && if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
(rec.ir_freecount == XFS_IALLOC_INODES(mp))) { (rec.ir_freecount == mp->m_ialloc_inos)) {
*delete = 1; *delete = 1;
*first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
...@@ -1212,7 +1205,7 @@ xfs_difree( ...@@ -1212,7 +1205,7 @@ xfs_difree(
* AGI and Superblock inode counts, and mark the disk space * AGI and Superblock inode counts, and mark the disk space
* to be freed when the transaction is committed. * to be freed when the transaction is committed.
*/ */
ilen = XFS_IALLOC_INODES(mp); ilen = mp->m_ialloc_inos;
be32_add_cpu(&agi->agi_count, -ilen); be32_add_cpu(&agi->agi_count, -ilen);
be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
...@@ -1228,9 +1221,9 @@ xfs_difree( ...@@ -1228,9 +1221,9 @@ xfs_difree(
goto error0; goto error0;
} }
xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno,
agno, XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)), XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)),
XFS_IALLOC_BLOCKS(mp), flist, mp); mp->m_ialloc_blks, flist, mp);
} else { } else {
*delete = 0; *delete = 0;
...@@ -1311,7 +1304,7 @@ xfs_imap_lookup( ...@@ -1311,7 +1304,7 @@ xfs_imap_lookup(
/* check that the returned record contains the required inode */ /* check that the returned record contains the required inode */
if (rec.ir_startino > agino || if (rec.ir_startino > agino ||
rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) rec.ir_startino + mp->m_ialloc_inos <= agino)
return EINVAL; return EINVAL;
/* for untrusted inodes check it is allocated first */ /* for untrusted inodes check it is allocated first */
...@@ -1384,7 +1377,7 @@ xfs_imap( ...@@ -1384,7 +1377,7 @@ xfs_imap(
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
} }
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; blks_per_cluster = xfs_icluster_size_fsb(mp);
/* /*
* For bulkstat and handle lookups, we have an untrusted inode number * For bulkstat and handle lookups, we have an untrusted inode number
...@@ -1405,7 +1398,7 @@ xfs_imap( ...@@ -1405,7 +1398,7 @@ xfs_imap(
* If the inode cluster size is the same as the blocksize or * If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics. * smaller we get to the buffer by simple arithmetics.
*/ */
if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { if (blks_per_cluster == 1) {
offset = XFS_INO_TO_OFFSET(mp, ino); offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock); ASSERT(offset < mp->m_sb.sb_inopblock);
......
...@@ -25,17 +25,18 @@ struct xfs_mount; ...@@ -25,17 +25,18 @@ struct xfs_mount;
struct xfs_trans; struct xfs_trans;
struct xfs_btree_cur; struct xfs_btree_cur;
/* /* Move inodes in clusters of this size */
* Allocation parameters for inode allocation.
*/
#define XFS_IALLOC_INODES(mp) (mp)->m_ialloc_inos
#define XFS_IALLOC_BLOCKS(mp) (mp)->m_ialloc_blks
/*
* Move inodes in clusters of this size.
*/
#define XFS_INODE_BIG_CLUSTER_SIZE 8192 #define XFS_INODE_BIG_CLUSTER_SIZE 8192
#define XFS_INODE_CLUSTER_SIZE(mp) (mp)->m_inode_cluster_size
/* Calculate and return the number of filesystem blocks per inode cluster */
static inline int
xfs_icluster_size_fsb(
struct xfs_mount *mp)
{
if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size)
return 1;
return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
}
/* /*
* Make an inode pointer out of the buffer/offset. * Make an inode pointer out of the buffer/offset.
......
...@@ -2141,8 +2141,8 @@ xfs_ifree_cluster( ...@@ -2141,8 +2141,8 @@ xfs_ifree_cluster(
{ {
xfs_mount_t *mp = free_ip->i_mount; xfs_mount_t *mp = free_ip->i_mount;
int blks_per_cluster; int blks_per_cluster;
int inodes_per_cluster;
int nbufs; int nbufs;
int ninodes;
int i, j; int i, j;
xfs_daddr_t blkno; xfs_daddr_t blkno;
xfs_buf_t *bp; xfs_buf_t *bp;
...@@ -2152,18 +2152,11 @@ xfs_ifree_cluster( ...@@ -2152,18 +2152,11 @@ xfs_ifree_cluster(
struct xfs_perag *pag; struct xfs_perag *pag;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = xfs_icluster_size_fsb(mp);
blks_per_cluster = 1; inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
ninodes = mp->m_sb.sb_inopblock; nbufs = mp->m_ialloc_blks / blks_per_cluster;
nbufs = XFS_IALLOC_BLOCKS(mp);
} else {
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
mp->m_sb.sb_blocksize;
ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
}
for (j = 0; j < nbufs; j++, inum += ninodes) { for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
XFS_INO_TO_AGBNO(mp, inum)); XFS_INO_TO_AGBNO(mp, inum));
...@@ -2225,7 +2218,7 @@ xfs_ifree_cluster( ...@@ -2225,7 +2218,7 @@ xfs_ifree_cluster(
* transaction stale above, which means there is no point in * transaction stale above, which means there is no point in
* even trying to lock them. * even trying to lock them.
*/ */
for (i = 0; i < ninodes; i++) { for (i = 0; i < inodes_per_cluster; i++) {
retry: retry:
rcu_read_lock(); rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, ip = radix_tree_lookup(&pag->pag_ici_root,
...@@ -2906,13 +2899,13 @@ xfs_iflush_cluster( ...@@ -2906,13 +2899,13 @@ xfs_iflush_cluster(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
if (!ilist) if (!ilist)
goto out_put; goto out_put;
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
rcu_read_lock(); rcu_read_lock();
/* really need a gang lookup range call here */ /* really need a gang lookup range call here */
......
...@@ -209,9 +209,8 @@ xfs_bulkstat( ...@@ -209,9 +209,8 @@ xfs_bulkstat(
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
xfs_ino_t lastino; /* last inode number returned */ xfs_ino_t lastino; /* last inode number returned */
int nbcluster; /* # of blocks in a cluster */ int blks_per_cluster; /* # of blocks per cluster */
int nicluster; /* # of inodes in a cluster */ int inodes_per_cluster;/* # of inodes per cluster */
int nimask; /* mask for inode clusters */
int nirbuf; /* size of irbuf */ int nirbuf; /* size of irbuf */
int rval; /* return value error code */ int rval; /* return value error code */
int tmp; /* result value from btree calls */ int tmp; /* result value from btree calls */
...@@ -243,11 +242,8 @@ xfs_bulkstat( ...@@ -243,11 +242,8 @@ xfs_bulkstat(
*done = 0; *done = 0;
fmterror = 0; fmterror = 0;
ubufp = ubuffer; ubufp = ubuffer;
nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? blks_per_cluster = xfs_icluster_size_fsb(mp);
mp->m_sb.sb_inopblock : inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
if (!irbuf) if (!irbuf)
return ENOMEM; return ENOMEM;
...@@ -390,12 +386,12 @@ xfs_bulkstat( ...@@ -390,12 +386,12 @@ xfs_bulkstat(
agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
for (chunkidx = 0; for (chunkidx = 0;
chunkidx < XFS_INODES_PER_CHUNK; chunkidx < XFS_INODES_PER_CHUNK;
chunkidx += nicluster, chunkidx += inodes_per_cluster,
agbno += nbcluster) { agbno += blks_per_cluster) {
if (xfs_inobt_maskn(chunkidx, nicluster) if (xfs_inobt_maskn(chunkidx,
& ~r.ir_free) inodes_per_cluster) & ~r.ir_free)
xfs_btree_reada_bufs(mp, agno, xfs_btree_reada_bufs(mp, agno,
agbno, nbcluster, agbno, blks_per_cluster,
&xfs_inode_buf_ops); &xfs_inode_buf_ops);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
......
...@@ -2523,19 +2523,19 @@ xlog_recover_buffer_pass2( ...@@ -2523,19 +2523,19 @@ xlog_recover_buffer_pass2(
* *
* Also make sure that only inode buffers with good sizes stay in * Also make sure that only inode buffers with good sizes stay in
* the buffer cache. The kernel moves inodes in buffers of 1 block * the buffer cache. The kernel moves inodes in buffers of 1 block
* or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
* buffers in the log can be a different size if the log was generated * buffers in the log can be a different size if the log was generated
* by an older kernel using unclustered inode buffers or a newer kernel * by an older kernel using unclustered inode buffers or a newer kernel
* running with a different inode cluster size. Regardless, if the * running with a different inode cluster size. Regardless, if the
* the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
* for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep * for *our* value of mp->m_inode_cluster_size, then we need to keep
* the buffer out of the buffer cache so that the buffer won't * the buffer out of the buffer cache so that the buffer won't
* overlap with future reads of those inodes. * overlap with future reads of those inodes.
*/ */
if (XFS_DINODE_MAGIC == if (XFS_DINODE_MAGIC ==
be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
(BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { (__uint32_t)log->l_mp->m_inode_cluster_size))) {
xfs_buf_stale(bp); xfs_buf_stale(bp);
error = xfs_bwrite(bp); error = xfs_bwrite(bp);
} else { } else {
...@@ -3208,10 +3208,10 @@ xlog_recover_do_icreate_pass2( ...@@ -3208,10 +3208,10 @@ xlog_recover_do_icreate_pass2(
} }
/* existing allocation is fixed value */ /* existing allocation is fixed value */
ASSERT(count == XFS_IALLOC_INODES(mp)); ASSERT(count == mp->m_ialloc_inos);
ASSERT(length == XFS_IALLOC_BLOCKS(mp)); ASSERT(length == mp->m_ialloc_blks);
if (count != XFS_IALLOC_INODES(mp) || if (count != mp->m_ialloc_inos ||
length != XFS_IALLOC_BLOCKS(mp)) { length != mp->m_ialloc_blks) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
return EINVAL; return EINVAL;
} }
......
...@@ -174,7 +174,7 @@ xfs_calc_itruncate_reservation( ...@@ -174,7 +174,7 @@ xfs_calc_itruncate_reservation(
xfs_calc_buf_res(5, 0) + xfs_calc_buf_res(5, 0) +
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
XFS_FSB_TO_B(mp, 1)) + XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0))); mp->m_in_maxlevels, 0)));
} }
...@@ -282,7 +282,7 @@ xfs_calc_create_resv_modify( ...@@ -282,7 +282,7 @@ xfs_calc_create_resv_modify(
* For create we can allocate some inodes giving: * For create we can allocate some inodes giving:
* the agi and agf of the ag getting the new inodes: 2 * sectorsize * the agi and agf of the ag getting the new inodes: 2 * sectorsize
* the superblock for the nlink flag: sector size * the superblock for the nlink flag: sector size
* the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize * the inode blocks allocated: mp->m_ialloc_blks * blocksize
* the inode btree: max depth * blocksize * the inode btree: max depth * blocksize
* the allocation btrees: 2 trees * (max depth - 1) * block size * the allocation btrees: 2 trees * (max depth - 1) * block size
*/ */
...@@ -292,7 +292,7 @@ xfs_calc_create_resv_alloc( ...@@ -292,7 +292,7 @@ xfs_calc_create_resv_alloc(
{ {
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize + mp->m_sb.sb_sectsize +
xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) + xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
XFS_FSB_TO_B(mp, 1)); XFS_FSB_TO_B(mp, 1));
...@@ -385,9 +385,9 @@ xfs_calc_ifree_reservation( ...@@ -385,9 +385,9 @@ xfs_calc_ifree_reservation(
xfs_calc_inode_res(mp, 1) + xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) + max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size) +
xfs_calc_buf_res(1, 0) + xfs_calc_buf_res(1, 0) +
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0) + mp->m_in_maxlevels, 0) +
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
XFS_FSB_TO_B(mp, 1)); XFS_FSB_TO_B(mp, 1));
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#define XFS_DIRREMOVE_SPACE_RES(mp) \ #define XFS_DIRREMOVE_SPACE_RES(mp) \
XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
#define XFS_IALLOC_SPACE_RES(mp) \ #define XFS_IALLOC_SPACE_RES(mp) \
(XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1) ((mp)->m_ialloc_blks + (mp)->m_in_maxlevels - 1)
/* /*
* Space reservation values for various transactions. * Space reservation values for various transactions.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment