Commit d63192c8 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: refactor xfs_qm_dqtobp and xfs_qm_dqalloc

Separate the disk dquot read and allocation functionality into
two helper functions, then refactor dqread to call them directly.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 617cd5c1
...@@ -288,49 +288,43 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) ...@@ -288,49 +288,43 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
} }
/* /*
* Allocate a block and fill it with dquots. * Ensure that the given in-core dquot has a buffer on disk backing it, and
* This is called when the bmapi finds a hole. * return the buffer. This is called when the bmapi finds a hole.
*/ */
STATIC int STATIC int
xfs_qm_dqalloc( xfs_dquot_disk_alloc(
xfs_trans_t **tpp, struct xfs_trans **tpp,
xfs_mount_t *mp, struct xfs_dquot *dqp,
xfs_dquot_t *dqp, struct xfs_buf **bpp)
xfs_inode_t *quotip,
xfs_fileoff_t offset_fsb,
xfs_buf_t **O_bpp)
{ {
xfs_fsblock_t firstblock; struct xfs_bmbt_irec map;
struct xfs_defer_ops dfops; struct xfs_defer_ops dfops;
xfs_bmbt_irec_t map; struct xfs_mount *mp = (*tpp)->t_mountp;
int nmaps, error; struct xfs_buf *bp;
xfs_buf_t *bp; struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
xfs_trans_t *tp = *tpp; xfs_fsblock_t firstblock;
int nmaps = 1;
ASSERT(tp != NULL); int error;
trace_xfs_dqalloc(dqp); trace_xfs_dqalloc(dqp);
/*
* Initialize the bmap freelist prior to calling bmapi code.
*/
xfs_defer_init(&dfops, &firstblock); xfs_defer_init(&dfops, &firstblock);
xfs_ilock(quotip, XFS_ILOCK_EXCL); xfs_ilock(quotip, XFS_ILOCK_EXCL);
/*
* Return if this type of quotas is turned off while we didn't
* have an inode lock
*/
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
/*
* Return if this type of quotas is turned off while we didn't
* have an inode lock
*/
xfs_iunlock(quotip, XFS_ILOCK_EXCL); xfs_iunlock(quotip, XFS_ILOCK_EXCL);
return -ESRCH; return -ESRCH;
} }
xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); /* Create the block mapping. */
nmaps = 1; xfs_trans_ijoin(*tpp, quotip, XFS_ILOCK_EXCL);
error = xfs_bmapi_write(tp, quotip, offset_fsb, error = xfs_bmapi_write(*tpp, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp), &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
&map, &nmaps, &dfops); &map, &nmaps, &dfops);
if (error) if (error)
goto error0; goto error0;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
...@@ -344,10 +338,8 @@ xfs_qm_dqalloc( ...@@ -344,10 +338,8 @@ xfs_qm_dqalloc(
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
/* now we can just get the buffer (there's nothing to read yet) */ /* now we can just get the buffer (there's nothing to read yet) */
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, bp = xfs_trans_get_buf(*tpp, mp->m_ddev_targp, dqp->q_blkno,
dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0);
mp->m_quotainfo->qi_dqchunklen,
0);
if (!bp) { if (!bp) {
error = -ENOMEM; error = -ENOMEM;
goto error1; goto error1;
...@@ -358,8 +350,9 @@ xfs_qm_dqalloc( ...@@ -358,8 +350,9 @@ xfs_qm_dqalloc(
* Make a chunk of dquots out of this buffer and log * Make a chunk of dquots out of this buffer and log
* the entire thing. * the entire thing.
*/ */
xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), xfs_qm_init_dquot_blk(*tpp, mp, be32_to_cpu(dqp->q_core.d_id),
dqp->dq_flags & XFS_DQ_ALLTYPES, bp); dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
xfs_buf_set_ref(bp, XFS_DQUOT_REF);
/* /*
* Hold the buffer and join it to the dfops so that we'll still own * Hold the buffer and join it to the dfops so that we'll still own
...@@ -379,7 +372,7 @@ xfs_qm_dqalloc( ...@@ -379,7 +372,7 @@ xfs_qm_dqalloc(
* transaction, so we must _buf_relse it. * transaction, so we must _buf_relse it.
* *
* If everything succeeds, the caller of this function is returned a * If everything succeeds, the caller of this function is returned a
* buffer that is locked and joined to the transaction. The caller * buffer that is locked and held to the transaction. The caller
* is responsible for unlocking any buffer passed back, either * is responsible for unlocking any buffer passed back, either
* manually or by committing the transaction. * manually or by committing the transaction.
*/ */
...@@ -395,8 +388,7 @@ xfs_qm_dqalloc( ...@@ -395,8 +388,7 @@ xfs_qm_dqalloc(
xfs_buf_relse(bp); xfs_buf_relse(bp);
goto error1; goto error1;
} }
xfs_trans_bhold_release(*tpp, bp); *bpp = bp;
*O_bpp = bp;
return 0; return 0;
error1: error1:
...@@ -406,32 +398,24 @@ xfs_qm_dqalloc( ...@@ -406,32 +398,24 @@ xfs_qm_dqalloc(
} }
/* /*
* Maps a dquot to the buffer containing its on-disk version. * Read in the in-core dquot's on-disk metadata and return the buffer.
* This returns a ptr to the buffer containing the on-disk dquot * Returns ENOENT to signal a hole.
* in the bpp param, and a ptr to the on-disk dquot within that buffer
*/ */
STATIC int STATIC int
xfs_qm_dqtobp( xfs_dquot_disk_read(
xfs_trans_t **tpp, struct xfs_mount *mp,
xfs_dquot_t *dqp, struct xfs_dquot *dqp,
xfs_disk_dquot_t **O_ddpp, struct xfs_buf **bpp)
xfs_buf_t **O_bpp,
uint flags)
{ {
struct xfs_bmbt_irec map; struct xfs_bmbt_irec map;
int nmaps = 1, error;
struct xfs_buf *bp; struct xfs_buf *bp;
struct xfs_inode *quotip; struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
struct xfs_mount *mp = dqp->q_mount;
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
struct xfs_trans *tp = (tpp ? *tpp : NULL);
uint lock_mode; uint lock_mode;
int nmaps = 1;
quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags); int error;
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
lock_mode = xfs_ilock_data_map_shared(quotip); lock_mode = xfs_ilock_data_map_shared(quotip);
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { if (!xfs_this_quota_on(mp, dqp->dq_flags)) {
/* /*
* Return if this type of quotas is turned off while we * Return if this type of quotas is turned off while we
* didn't have the quota inode lock. * didn't have the quota inode lock.
...@@ -444,57 +428,36 @@ xfs_qm_dqtobp( ...@@ -444,57 +428,36 @@ xfs_qm_dqtobp(
* Find the block map; no allocations yet * Find the block map; no allocations yet
*/ */
error = xfs_bmapi_read(quotip, dqp->q_fileoffset, error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
xfs_iunlock(quotip, lock_mode); xfs_iunlock(quotip, lock_mode);
if (error) if (error)
return error; return error;
ASSERT(nmaps == 1); ASSERT(nmaps == 1);
ASSERT(map.br_blockcount == 1); ASSERT(map.br_blockcount >= 1);
ASSERT(map.br_startblock != DELAYSTARTBLOCK);
if (map.br_startblock == HOLESTARTBLOCK)
return -ENOENT;
trace_xfs_dqtobp_read(dqp);
/* /*
* Offset of dquot in the (fixed sized) dquot chunk. * store the blkno etc so that we don't have to do the
* mapping all the time
*/ */
dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
sizeof(xfs_dqblk_t);
ASSERT(map.br_startblock != DELAYSTARTBLOCK);
if (map.br_startblock == HOLESTARTBLOCK) {
/*
* We don't allocate unless we're asked to
*/
if (!(flags & XFS_QMOPT_DQALLOC))
return -ENOENT;
ASSERT(tp);
error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
dqp->q_fileoffset, &bp);
if (error)
return error;
tp = *tpp;
} else {
trace_xfs_dqtobp_read(dqp);
/* error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
* store the blkno etc so that we don't have to do the mp->m_quotainfo->qi_dqchunklen, 0, &bp,
* mapping all the time &xfs_dquot_buf_ops);
*/ if (error) {
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); ASSERT(bp == NULL);
return error;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
0, &bp, &xfs_dquot_buf_ops);
if (error) {
ASSERT(bp == NULL);
return error;
}
} }
ASSERT(xfs_buf_islocked(bp)); ASSERT(xfs_buf_islocked(bp));
*O_bpp = bp; xfs_buf_set_ref(bp, XFS_DQUOT_REF);
*O_ddpp = bp->b_addr + dqp->q_bufoffset; *bpp = bp;
return 0; return 0;
} }
...@@ -516,6 +479,12 @@ xfs_dquot_alloc( ...@@ -516,6 +479,12 @@ xfs_dquot_alloc(
INIT_LIST_HEAD(&dqp->q_lru); INIT_LIST_HEAD(&dqp->q_lru);
mutex_init(&dqp->q_qlock); mutex_init(&dqp->q_qlock);
init_waitqueue_head(&dqp->q_pinwait); init_waitqueue_head(&dqp->q_pinwait);
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
/*
* Offset of dquot in the (fixed sized) dquot chunk.
*/
dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
sizeof(xfs_dqblk_t);
/* /*
* Because we want to use a counting completion, complete * Because we want to use a counting completion, complete
...@@ -554,8 +523,10 @@ xfs_dquot_alloc( ...@@ -554,8 +523,10 @@ xfs_dquot_alloc(
STATIC void STATIC void
xfs_dquot_from_disk( xfs_dquot_from_disk(
struct xfs_dquot *dqp, struct xfs_dquot *dqp,
struct xfs_disk_dquot *ddqp) struct xfs_buf *bp)
{ {
struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
/* copy everything from disk dquot to the incore dquot */ /* copy everything from disk dquot to the incore dquot */
memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
...@@ -571,6 +542,44 @@ xfs_dquot_from_disk( ...@@ -571,6 +542,44 @@ xfs_dquot_from_disk(
xfs_dquot_set_prealloc_limits(dqp); xfs_dquot_set_prealloc_limits(dqp);
} }
/* Allocate and initialize the dquot buffer for this in-core dquot. */
static int
xfs_qm_dqread_alloc(
struct xfs_mount *mp,
struct xfs_dquot *dqp,
struct xfs_buf **bpp)
{
struct xfs_trans *tp;
struct xfs_buf *bp;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
if (error)
goto err;
error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
if (error)
goto err_cancel;
error = xfs_trans_commit(tp);
if (error) {
/*
* Buffer was held to the transaction, so we have to unlock it
* manually here because we're not passing it back.
*/
xfs_buf_relse(bp);
goto err;
}
*bpp = bp;
return 0;
err_cancel:
xfs_trans_cancel(tp);
err:
return error;
}
/* /*
* Read in the ondisk dquot using dqtobp() then copy it to an incore version, * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
* and release the buffer immediately. * and release the buffer immediately.
...@@ -583,74 +592,39 @@ xfs_qm_dqread( ...@@ -583,74 +592,39 @@ xfs_qm_dqread(
xfs_dqid_t id, xfs_dqid_t id,
uint type, uint type,
uint flags, uint flags,
struct xfs_dquot **O_dqpp) struct xfs_dquot **dqpp)
{ {
struct xfs_dquot *dqp; struct xfs_dquot *dqp;
struct xfs_disk_dquot *ddqp;
struct xfs_buf *bp; struct xfs_buf *bp;
struct xfs_trans *tp = NULL;
int error; int error;
dqp = xfs_dquot_alloc(mp, id, type); dqp = xfs_dquot_alloc(mp, id, type);
trace_xfs_dqread(dqp); trace_xfs_dqread(dqp);
if (flags & XFS_QMOPT_DQALLOC) { /* Try to read the buffer, allocating if necessary. */
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, error = xfs_dquot_disk_read(mp, dqp, &bp);
XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp); if (error == -ENOENT && (flags & XFS_QMOPT_DQALLOC))
if (error) error = xfs_qm_dqread_alloc(mp, dqp, &bp);
goto error0; if (error)
} goto err;
/*
* get a pointer to the on-disk dquot and the buffer containing it
* dqp already knows its own type (GROUP/USER).
*/
error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
if (error) {
/*
* This can happen if quotas got turned off (ESRCH),
* or if the dquot didn't exist on disk and we ask to
* allocate (ENOENT).
*/
trace_xfs_dqread_fail(dqp);
goto error1;
}
xfs_dquot_from_disk(dqp, ddqp);
/* Mark the buf so that this will stay incore a little longer */
xfs_buf_set_ref(bp, XFS_DQUOT_REF);
/* /*
* We got the buffer with a xfs_trans_read_buf() (in dqtobp()) * At this point we should have a clean locked buffer. Copy the data
* So we need to release with xfs_trans_brelse(). * to the incore dquot and release the buffer since the incore dquot
* The strategy here is identical to that of inodes; we lock * has its own locking protocol so we needn't tie up the buffer any
* the dquot in xfs_qm_dqget() before making it accessible to * further.
* others. This is because dquots, like inodes, need a good level of
* concurrency, and we don't want to take locks on the entire buffers
* for dquot accesses.
* Note also that the dquot buffer may even be dirty at this point, if
* this particular dquot was repaired. We still aren't afraid to
* brelse it because we have the changes incore.
*/ */
ASSERT(xfs_buf_islocked(bp)); ASSERT(xfs_buf_islocked(bp));
xfs_trans_brelse(tp, bp); xfs_dquot_from_disk(dqp, bp);
if (tp) {
error = xfs_trans_commit(tp);
if (error)
goto error0;
}
*O_dqpp = dqp; xfs_buf_relse(bp);
*dqpp = dqp;
return error; return error;
error1: err:
if (tp) trace_xfs_dqread_fail(dqp);
xfs_trans_cancel(tp);
error0:
xfs_qm_dqdestroy(dqp); xfs_qm_dqdestroy(dqp);
*O_dqpp = NULL; *dqpp = NULL;
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment