Commit ff14ee42 authored by Dave Chinner's avatar Dave Chinner

Merge branch 'xfs-misc-fixes-1-for-3.16' into for-next

parents b7676929 8cfcc3e5
...@@ -278,6 +278,17 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, ...@@ -278,6 +278,17 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
return ret; return ret;
} }
static int quota_rmxquota(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->rm_xquota)
return -ENOSYS;
return sb->s_qcop->rm_xquota(sb, flags);
}
/* Copy parameters and call proper function */ /* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr, struct path *path) void __user *addr, struct path *path)
...@@ -316,8 +327,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, ...@@ -316,8 +327,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
return sb->s_qcop->quota_sync(sb, type); return sb->s_qcop->quota_sync(sb, type);
case Q_XQUOTAON: case Q_XQUOTAON:
case Q_XQUOTAOFF: case Q_XQUOTAOFF:
case Q_XQUOTARM:
return quota_setxstate(sb, cmd, addr); return quota_setxstate(sb, cmd, addr);
case Q_XQUOTARM:
return quota_rmxquota(sb, addr);
case Q_XGETQSTAT: case Q_XGETQSTAT:
return quota_getxstate(sb, addr); return quota_getxstate(sb, addr);
case Q_XGETQSTATV: case Q_XGETQSTATV:
......
...@@ -456,7 +456,7 @@ xfs_dir2_leaf_readbuf( ...@@ -456,7 +456,7 @@ xfs_dir2_leaf_readbuf(
/* /*
* Advance offset through the mapping table. * Advance offset through the mapping table.
*/ */
for (j = 0; j < mp->m_dirblkfsbs; j++) { for (j = 0; j < mp->m_dirblkfsbs; j += length ) {
/* /*
* The rest of this extent but not more than a dir * The rest of this extent but not more than a dir
* block. * block.
...@@ -464,7 +464,6 @@ xfs_dir2_leaf_readbuf( ...@@ -464,7 +464,6 @@ xfs_dir2_leaf_readbuf(
length = min_t(int, mp->m_dirblkfsbs, length = min_t(int, mp->m_dirblkfsbs,
map[mip->ra_index].br_blockcount - map[mip->ra_index].br_blockcount -
mip->ra_offset); mip->ra_offset);
j += length;
mip->ra_offset += length; mip->ra_offset += length;
/* /*
......
...@@ -832,47 +832,6 @@ xfs_qm_dqget( ...@@ -832,47 +832,6 @@ xfs_qm_dqget(
return (0); return (0);
} }
STATIC void
xfs_qm_dqput_final(
struct xfs_dquot *dqp)
{
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
struct xfs_dquot *gdqp;
struct xfs_dquot *pdqp;
trace_xfs_dqput_free(dqp);
if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(xs_qm_dquot_unused);
/*
* If we just added a udquot to the freelist, then we want to release
* the gdquot/pdquot reference that it (probably) has. Otherwise it'll
* keep the gdquot/pdquot from getting reclaimed.
*/
gdqp = dqp->q_gdquot;
if (gdqp) {
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
pdqp = dqp->q_pdquot;
if (pdqp) {
xfs_dqlock(pdqp);
dqp->q_pdquot = NULL;
}
xfs_dqunlock(dqp);
/*
* If we had a group/project quota hint, release it now.
*/
if (gdqp)
xfs_qm_dqput(gdqp);
if (pdqp)
xfs_qm_dqput(pdqp);
}
/* /*
* Release a reference to the dquot (decrement ref-count) and unlock it. * Release a reference to the dquot (decrement ref-count) and unlock it.
* *
...@@ -888,10 +847,14 @@ xfs_qm_dqput( ...@@ -888,10 +847,14 @@ xfs_qm_dqput(
trace_xfs_dqput(dqp); trace_xfs_dqput(dqp);
if (--dqp->q_nrefs > 0) if (--dqp->q_nrefs == 0) {
xfs_dqunlock(dqp); struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
else trace_xfs_dqput_free(dqp);
xfs_qm_dqput_final(dqp);
if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(xs_qm_dquot_unused);
}
xfs_dqunlock(dqp);
} }
/* /*
......
...@@ -52,8 +52,6 @@ typedef struct xfs_dquot { ...@@ -52,8 +52,6 @@ typedef struct xfs_dquot {
int q_bufoffset; /* off of dq in buffer (# dquots) */ int q_bufoffset; /* off of dq in buffer (# dquots) */
xfs_fileoff_t q_fileoffset; /* offset in quotas file */ xfs_fileoff_t q_fileoffset; /* offset in quotas file */
struct xfs_dquot*q_gdquot; /* group dquot, hint only */
struct xfs_dquot*q_pdquot; /* project dquot, hint only */
xfs_disk_dquot_t q_core; /* actual usage & quotas */ xfs_disk_dquot_t q_core; /* actual usage & quotas */
xfs_dq_logitem_t q_logitem; /* dquot log item */ xfs_dq_logitem_t q_logitem; /* dquot log item */
xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
......
...@@ -543,10 +543,11 @@ xfs_attrmulti_by_handle( ...@@ -543,10 +543,11 @@ xfs_attrmulti_by_handle(
ops = memdup_user(am_hreq.ops, size); ops = memdup_user(am_hreq.ops, size);
if (IS_ERR(ops)) { if (IS_ERR(ops)) {
error = PTR_ERR(ops); error = -PTR_ERR(ops);
goto out_dput; goto out_dput;
} }
error = ENOMEM;
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
if (!attr_name) if (!attr_name)
goto out_kfree_ops; goto out_kfree_ops;
...@@ -556,7 +557,7 @@ xfs_attrmulti_by_handle( ...@@ -556,7 +557,7 @@ xfs_attrmulti_by_handle(
ops[i].am_error = strncpy_from_user((char *)attr_name, ops[i].am_error = strncpy_from_user((char *)attr_name,
ops[i].am_attrname, MAXNAMELEN); ops[i].am_attrname, MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
error = -ERANGE; error = ERANGE;
if (ops[i].am_error < 0) if (ops[i].am_error < 0)
break; break;
......
...@@ -424,10 +424,11 @@ xfs_compat_attrmulti_by_handle( ...@@ -424,10 +424,11 @@ xfs_compat_attrmulti_by_handle(
ops = memdup_user(compat_ptr(am_hreq.ops), size); ops = memdup_user(compat_ptr(am_hreq.ops), size);
if (IS_ERR(ops)) { if (IS_ERR(ops)) {
error = PTR_ERR(ops); error = -PTR_ERR(ops);
goto out_dput; goto out_dput;
} }
error = ENOMEM;
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
if (!attr_name) if (!attr_name)
goto out_kfree_ops; goto out_kfree_ops;
...@@ -438,7 +439,7 @@ xfs_compat_attrmulti_by_handle( ...@@ -438,7 +439,7 @@ xfs_compat_attrmulti_by_handle(
compat_ptr(ops[i].am_attrname), compat_ptr(ops[i].am_attrname),
MAXNAMELEN); MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
error = -ERANGE; error = ERANGE;
if (ops[i].am_error < 0) if (ops[i].am_error < 0)
break; break;
......
...@@ -829,22 +829,34 @@ xfs_setattr_size( ...@@ -829,22 +829,34 @@ xfs_setattr_size(
*/ */
inode_dio_wait(inode); inode_dio_wait(inode);
/*
* Do all the page cache truncate work outside the transaction context
* as the "lock" order is page lock->log space reservation. i.e.
* locking pages inside the transaction can ABBA deadlock with
* writeback. We have to do the VFS inode size update before we truncate
* the pagecache, however, to avoid racing with page faults beyond the
* new EOF they are not serialised against truncate operations except by
* page locks and size updates.
*
* Hence we are in a situation where a truncate can fail with ENOMEM
* from xfs_trans_reserve(), but having already truncated the in-memory
* version of the file (i.e. made user visible changes). There's not
* much we can do about this, except to hope that the caller sees ENOMEM
* and retries the truncate operation.
*/
error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
if (error) if (error)
return error; return error;
truncate_setsize(inode, newsize);
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
if (error) if (error)
goto out_trans_cancel; goto out_trans_cancel;
truncate_setsize(inode, newsize);
commit_flags = XFS_TRANS_RELEASE_LOG_RES; commit_flags = XFS_TRANS_RELEASE_LOG_RES;
lock_flags |= XFS_ILOCK_EXCL; lock_flags |= XFS_ILOCK_EXCL;
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0); xfs_trans_ijoin(tp, ip, 0);
/* /*
......
...@@ -3952,11 +3952,14 @@ xfs_log_force_umount( ...@@ -3952,11 +3952,14 @@ xfs_log_force_umount(
retval = xlog_state_ioerror(log); retval = xlog_state_ioerror(log);
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
} }
/* /*
* Wake up everybody waiting on xfs_log_force. * Wake up everybody waiting on xfs_log_force. Wake the CIL push first
* Callback all log item committed functions as if the * as if the log writes were completed. The abort handling in the log
* log writes were completed. * item committed callback functions will do this again under lock to
* avoid races.
*/ */
wake_up_all(&log->l_cilp->xc_commit_wait);
xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
#ifdef XFSERRORDEBUG #ifdef XFSERRORDEBUG
......
...@@ -385,7 +385,15 @@ xlog_cil_committed( ...@@ -385,7 +385,15 @@ xlog_cil_committed(
xfs_extent_busy_clear(mp, &ctx->busy_extents, xfs_extent_busy_clear(mp, &ctx->busy_extents,
(mp->m_flags & XFS_MOUNT_DISCARD) && !abort); (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
/*
* If we are aborting the commit, wake up anyone waiting on the
* committing list. If we don't, then a shutdown we can leave processes
* waiting in xlog_cil_force_lsn() waiting on a sequence commit that
* will never happen because we aborted it.
*/
spin_lock(&ctx->cil->xc_push_lock); spin_lock(&ctx->cil->xc_push_lock);
if (abort)
wake_up_all(&ctx->cil->xc_commit_wait);
list_del(&ctx->committing); list_del(&ctx->committing);
spin_unlock(&ctx->cil->xc_push_lock); spin_unlock(&ctx->cil->xc_push_lock);
...@@ -563,9 +571,19 @@ xlog_cil_push( ...@@ -563,9 +571,19 @@ xlog_cil_push(
restart: restart:
spin_lock(&cil->xc_push_lock); spin_lock(&cil->xc_push_lock);
list_for_each_entry(new_ctx, &cil->xc_committing, committing) { list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
/*
* Avoid getting stuck in this loop because we were woken by the
* shutdown, but then went back to sleep once already in the
* shutdown state.
*/
if (XLOG_FORCED_SHUTDOWN(log)) {
spin_unlock(&cil->xc_push_lock);
goto out_abort_free_ticket;
}
/* /*
* Higher sequences will wait for this one so skip them. * Higher sequences will wait for this one so skip them.
* Don't wait for own own sequence, either. * Don't wait for our own sequence, either.
*/ */
if (new_ctx->sequence >= ctx->sequence) if (new_ctx->sequence >= ctx->sequence)
continue; continue;
...@@ -810,6 +828,13 @@ xlog_cil_force_lsn( ...@@ -810,6 +828,13 @@ xlog_cil_force_lsn(
*/ */
spin_lock(&cil->xc_push_lock); spin_lock(&cil->xc_push_lock);
list_for_each_entry(ctx, &cil->xc_committing, committing) { list_for_each_entry(ctx, &cil->xc_committing, committing) {
/*
* Avoid getting stuck in this loop because we were woken by the
* shutdown, but then went back to sleep once already in the
* shutdown state.
*/
if (XLOG_FORCED_SHUTDOWN(log))
goto out_shutdown;
if (ctx->sequence > sequence) if (ctx->sequence > sequence)
continue; continue;
if (!ctx->commit_lsn) { if (!ctx->commit_lsn) {
...@@ -833,14 +858,12 @@ xlog_cil_force_lsn( ...@@ -833,14 +858,12 @@ xlog_cil_force_lsn(
* push sequence after the above wait loop and the CIL still contains * push sequence after the above wait loop and the CIL still contains
* dirty objects. * dirty objects.
* *
* When the push occurs, it will empty the CIL and * When the push occurs, it will empty the CIL and atomically increment
* atomically increment the currect sequence past the push sequence and * the currect sequence past the push sequence and move it into the
* move it into the committing list. Of course, if the CIL is clean at * committing list. Of course, if the CIL is clean at the time of the
* the time of the push, it won't have pushed the CIL at all, so in that * push, it won't have pushed the CIL at all, so in that case we should
* case we should try the push for this sequence again from the start * try the push for this sequence again from the start just in case.
* just in case.
*/ */
if (sequence == cil->xc_current_sequence && if (sequence == cil->xc_current_sequence &&
!list_empty(&cil->xc_cil)) { !list_empty(&cil->xc_cil)) {
spin_unlock(&cil->xc_push_lock); spin_unlock(&cil->xc_push_lock);
...@@ -849,6 +872,17 @@ xlog_cil_force_lsn( ...@@ -849,6 +872,17 @@ xlog_cil_force_lsn(
spin_unlock(&cil->xc_push_lock); spin_unlock(&cil->xc_push_lock);
return commit_lsn; return commit_lsn;
/*
* We detected a shutdown in progress. We need to trigger the log force
* to pass through it's iclog state machine error handling, even though
* we are already in a shutdown state. Hence we can't return
* NULLCOMMITLSN here as that has special meaning to log forces (i.e.
* LSN is already stable), so we return a zero LSN instead.
*/
out_shutdown:
spin_unlock(&cil->xc_push_lock);
return 0;
} }
/* /*
......
...@@ -192,47 +192,6 @@ xfs_qm_dqpurge( ...@@ -192,47 +192,6 @@ xfs_qm_dqpurge(
return 0; return 0;
} }
/*
* Release the group or project dquot pointers the user dquots maybe carrying
* around as a hint, and proceed to purge the user dquot cache if requested.
*/
STATIC int
xfs_qm_dqpurge_hints(
struct xfs_dquot *dqp,
void *data)
{
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
uint flags = *((uint *)data);
xfs_dqlock(dqp);
if (dqp->dq_flags & XFS_DQ_FREEING) {
xfs_dqunlock(dqp);
return EAGAIN;
}
/* If this quota has a hint attached, prepare for releasing it now */
gdqp = dqp->q_gdquot;
if (gdqp)
dqp->q_gdquot = NULL;
pdqp = dqp->q_pdquot;
if (pdqp)
dqp->q_pdquot = NULL;
xfs_dqunlock(dqp);
if (gdqp)
xfs_qm_dqrele(gdqp);
if (pdqp)
xfs_qm_dqrele(pdqp);
if (flags & XFS_QMOPT_UQUOTA)
return xfs_qm_dqpurge(dqp, NULL);
return 0;
}
/* /*
* Purge the dquot cache. * Purge the dquot cache.
*/ */
...@@ -241,18 +200,8 @@ xfs_qm_dqpurge_all( ...@@ -241,18 +200,8 @@ xfs_qm_dqpurge_all(
struct xfs_mount *mp, struct xfs_mount *mp,
uint flags) uint flags)
{ {
/* if (flags & XFS_QMOPT_UQUOTA)
* We have to release group/project dquot hint(s) from the user dquot xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
* at first if they are there, otherwise we would run into an infinite
* loop while walking through radix tree to purge other type of dquots
* since their refcount is not zero if the user dquot refers to them
* as hint.
*
* Call the special xfs_qm_dqpurge_hints() will end up go through the
* general xfs_qm_dqpurge() against user dquot cache if requested.
*/
xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
if (flags & XFS_QMOPT_GQUOTA) if (flags & XFS_QMOPT_GQUOTA)
xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
if (flags & XFS_QMOPT_PQUOTA) if (flags & XFS_QMOPT_PQUOTA)
...@@ -409,7 +358,6 @@ xfs_qm_dqattach_one( ...@@ -409,7 +358,6 @@ xfs_qm_dqattach_one(
xfs_dqid_t id, xfs_dqid_t id,
uint type, uint type,
uint doalloc, uint doalloc,
xfs_dquot_t *udqhint, /* hint */
xfs_dquot_t **IO_idqpp) xfs_dquot_t **IO_idqpp)
{ {
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
...@@ -419,9 +367,9 @@ xfs_qm_dqattach_one( ...@@ -419,9 +367,9 @@ xfs_qm_dqattach_one(
error = 0; error = 0;
/* /*
* See if we already have it in the inode itself. IO_idqpp is * See if we already have it in the inode itself. IO_idqpp is &i_udquot
* &i_udquot or &i_gdquot. This made the code look weird, but * or &i_gdquot. This made the code look weird, but made the logic a lot
* made the logic a lot simpler. * simpler.
*/ */
dqp = *IO_idqpp; dqp = *IO_idqpp;
if (dqp) { if (dqp) {
...@@ -430,49 +378,10 @@ xfs_qm_dqattach_one( ...@@ -430,49 +378,10 @@ xfs_qm_dqattach_one(
} }
/* /*
* udqhint is the i_udquot field in inode, and is non-NULL only * Find the dquot from somewhere. This bumps the reference count of
* when the type arg is group/project. Its purpose is to save a * dquot and returns it locked. This can return ENOENT if dquot didn't
* lookup by dqid (xfs_qm_dqget) by caching a group dquot inside * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
* the user dquot. * turned off suddenly.
*/
if (udqhint) {
ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
xfs_dqlock(udqhint);
/*
* No need to take dqlock to look at the id.
*
* The ID can't change until it gets reclaimed, and it won't
* be reclaimed as long as we have a ref from inode and we
* hold the ilock.
*/
if (type == XFS_DQ_GROUP)
dqp = udqhint->q_gdquot;
else
dqp = udqhint->q_pdquot;
if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
ASSERT(*IO_idqpp == NULL);
*IO_idqpp = xfs_qm_dqhold(dqp);
xfs_dqunlock(udqhint);
return 0;
}
/*
* We can't hold a dquot lock when we call the dqget code.
* We'll deadlock in no time, because of (not conforming to)
* lock ordering - the inodelock comes before any dquot lock,
* and we may drop and reacquire the ilock in xfs_qm_dqget().
*/
xfs_dqunlock(udqhint);
}
/*
* Find the dquot from somewhere. This bumps the
* reference count of dquot and returns it locked.
* This can return ENOENT if dquot didn't exist on
* disk and we didn't ask it to allocate;
* ESRCH if quotas got turned off suddenly.
*/ */
error = xfs_qm_dqget(ip->i_mount, ip, id, type, error = xfs_qm_dqget(ip->i_mount, ip, id, type,
doalloc | XFS_QMOPT_DOWARN, &dqp); doalloc | XFS_QMOPT_DOWARN, &dqp);
...@@ -490,48 +399,6 @@ xfs_qm_dqattach_one( ...@@ -490,48 +399,6 @@ xfs_qm_dqattach_one(
return 0; return 0;
} }
/*
* Given a udquot and group/project type, attach the group/project
* dquot pointer to the udquot as a hint for future lookups.
*/
STATIC void
xfs_qm_dqattach_hint(
struct xfs_inode *ip,
int type)
{
struct xfs_dquot **dqhintp;
struct xfs_dquot *dqp;
struct xfs_dquot *udq = ip->i_udquot;
ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
xfs_dqlock(udq);
if (type == XFS_DQ_GROUP) {
dqp = ip->i_gdquot;
dqhintp = &udq->q_gdquot;
} else {
dqp = ip->i_pdquot;
dqhintp = &udq->q_pdquot;
}
if (*dqhintp) {
struct xfs_dquot *tmp;
if (*dqhintp == dqp)
goto done;
tmp = *dqhintp;
*dqhintp = NULL;
xfs_qm_dqrele(tmp);
}
*dqhintp = xfs_qm_dqhold(dqp);
done:
xfs_dqunlock(udq);
}
static bool static bool
xfs_qm_need_dqattach( xfs_qm_need_dqattach(
struct xfs_inode *ip) struct xfs_inode *ip)
...@@ -562,7 +429,6 @@ xfs_qm_dqattach_locked( ...@@ -562,7 +429,6 @@ xfs_qm_dqattach_locked(
uint flags) uint flags)
{ {
xfs_mount_t *mp = ip->i_mount; xfs_mount_t *mp = ip->i_mount;
uint nquotas = 0;
int error = 0; int error = 0;
if (!xfs_qm_need_dqattach(ip)) if (!xfs_qm_need_dqattach(ip))
...@@ -570,77 +436,39 @@ xfs_qm_dqattach_locked( ...@@ -570,77 +436,39 @@ xfs_qm_dqattach_locked(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (XFS_IS_UQUOTA_ON(mp)) { if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
flags & XFS_QMOPT_DQALLOC, flags & XFS_QMOPT_DQALLOC,
NULL, &ip->i_udquot); &ip->i_udquot);
if (error) if (error)
goto done; goto done;
nquotas++; ASSERT(ip->i_udquot);
} }
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
if (XFS_IS_GQUOTA_ON(mp)) {
error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC, flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot); &ip->i_gdquot);
/*
* Don't worry about the udquot that we may have
* attached above. It'll get detached, if not already.
*/
if (error) if (error)
goto done; goto done;
nquotas++; ASSERT(ip->i_gdquot);
} }
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
if (XFS_IS_PQUOTA_ON(mp)) {
error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
flags & XFS_QMOPT_DQALLOC, flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_pdquot); &ip->i_pdquot);
/*
* Don't worry about the udquot that we may have
* attached above. It'll get detached, if not already.
*/
if (error) if (error)
goto done; goto done;
nquotas++; ASSERT(ip->i_pdquot);
} }
done:
/* /*
* Attach this group/project quota to the user quota as a hint. * Don't worry about the dquots that we may have attached before any
* This WON'T, in general, result in a thrash. * error - they'll get detached later if it has not already been done.
*/ */
if (nquotas > 1 && ip->i_udquot) {
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
/*
* We do not have i_udquot locked at this point, but this check
* is OK since we don't depend on the i_gdquot to be accurate
* 100% all the time. It is just a hint, and this will
* succeed in general.
*/
if (ip->i_udquot->q_gdquot != ip->i_gdquot)
xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
if (ip->i_udquot->q_pdquot != ip->i_pdquot)
xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
}
done:
#ifdef DEBUG
if (!error) {
if (XFS_IS_UQUOTA_ON(mp))
ASSERT(ip->i_udquot);
if (XFS_IS_GQUOTA_ON(mp))
ASSERT(ip->i_gdquot);
if (XFS_IS_PQUOTA_ON(mp))
ASSERT(ip->i_pdquot);
}
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
#endif
return error; return error;
} }
......
...@@ -278,9 +278,10 @@ xfs_qm_scall_trunc_qfiles( ...@@ -278,9 +278,10 @@ xfs_qm_scall_trunc_qfiles(
xfs_mount_t *mp, xfs_mount_t *mp,
uint flags) uint flags)
{ {
int error; int error = EINVAL;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
(flags & ~XFS_DQ_ALLTYPES)) {
xfs_debug(mp, "%s: flags=%x m_qflags=%x", xfs_debug(mp, "%s: flags=%x m_qflags=%x",
__func__, flags, mp->m_qflags); __func__, flags, mp->m_qflags);
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
......
...@@ -100,15 +100,35 @@ xfs_fs_set_xstate( ...@@ -100,15 +100,35 @@ xfs_fs_set_xstate(
if (!XFS_IS_QUOTA_ON(mp)) if (!XFS_IS_QUOTA_ON(mp))
return -EINVAL; return -EINVAL;
return -xfs_qm_scall_quotaoff(mp, flags); return -xfs_qm_scall_quotaoff(mp, flags);
case Q_XQUOTARM:
if (XFS_IS_QUOTA_ON(mp))
return -EINVAL;
return -xfs_qm_scall_trunc_qfiles(mp, flags);
} }
return -EINVAL; return -EINVAL;
} }
STATIC int
xfs_fs_rm_xquota(
struct super_block *sb,
unsigned int uflags)
{
struct xfs_mount *mp = XFS_M(sb);
unsigned int flags = 0;
if (sb->s_flags & MS_RDONLY)
return -EROFS;
if (XFS_IS_QUOTA_ON(mp))
return -EINVAL;
if (uflags & FS_USER_QUOTA)
flags |= XFS_DQ_USER;
if (uflags & FS_GROUP_QUOTA)
flags |= XFS_DQ_GROUP;
if (uflags & FS_USER_QUOTA)
flags |= XFS_DQ_PROJ;
return -xfs_qm_scall_trunc_qfiles(mp, flags);
}
STATIC int STATIC int
xfs_fs_get_dqblk( xfs_fs_get_dqblk(
struct super_block *sb, struct super_block *sb,
...@@ -149,6 +169,7 @@ const struct quotactl_ops xfs_quotactl_operations = { ...@@ -149,6 +169,7 @@ const struct quotactl_ops xfs_quotactl_operations = {
.get_xstatev = xfs_fs_get_xstatev, .get_xstatev = xfs_fs_get_xstatev,
.get_xstate = xfs_fs_get_xstate, .get_xstate = xfs_fs_get_xstate,
.set_xstate = xfs_fs_set_xstate, .set_xstate = xfs_fs_set_xstate,
.rm_xquota = xfs_fs_rm_xquota,
.get_dqblk = xfs_fs_get_dqblk, .get_dqblk = xfs_fs_get_dqblk,
.set_dqblk = xfs_fs_set_dqblk, .set_dqblk = xfs_fs_set_dqblk,
}; };
...@@ -329,6 +329,7 @@ struct quotactl_ops { ...@@ -329,6 +329,7 @@ struct quotactl_ops {
int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
int (*set_xstate)(struct super_block *, unsigned int, int); int (*set_xstate)(struct super_block *, unsigned int, int);
int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
int (*rm_xquota)(struct super_block *, unsigned int);
}; };
struct quota_format_type { struct quota_format_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment