Commit 239dab46 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.11-rc1-2' of git://oss.sgi.com/xfs/xfs

Pull more xfs updates from Ben Myers:
 "Here are a fix for xfs_fsr, a cleanup in bulkstat, a cleanup in
  xfs_open_by_handle, updated mount options documentation, a cleanup in
  xfs_bmapi_write, a fix for the size of dquot log reservations, a fix
  for sgid inheritance when acls are in use, a fix for cleaning up
  quotainfo structures, and some more of the work which allows group and
  project quotas to be used together.

  We had a few more in this last quota category that we might have liked
  to get in, but it looks there are still a few items that need to be
  addressed.

   - fix for xfs_fsr returning -EINVAL
   - cleanup in xfs_bulkstat
   - cleanup in xfs_open_by_handle
   - update mount options documentation
   - clean up local format handling in xfs_bmapi_write
   - fix dquot log reservations which were too small
   - fix sgid inheritance for subdirectories when default acls are in use
   - add project quota fields to various structures
   - fix teardown of quotainfo structures when quotas are turned off"

* tag 'for-linus-v3.11-rc1-2' of git://oss.sgi.com/xfs/xfs:
  xfs: Fix the logic check for all quotas being turned off
  xfs: Add pquota fields where gquota is used.
  xfs: fix sgid inheritance for subdirectories inheriting default acls [V3]
  xfs: dquot log reservations are too small
  xfs: remove local fork format handling from xfs_bmapi_write()
  xfs: update mount options documentation
  xfs: use get_unused_fd_flags(0) instead of get_unused_fd()
  xfs: clean up unused codes at xfs_bulkstat()
  xfs: use XFS_BMAP_BMDR_SPACE vs. XFS_BROOT_SIZE_ADJ
parents f1c41088 c31ad439
This diff is collapsed.
......@@ -690,6 +690,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
bp = NULL;
error = xfs_da_grow_inode(args, &blkno);
if (error) {
......
......@@ -1161,6 +1161,24 @@ xfs_bmap_extents_to_btree(
* since the file data needs to get logged so things will stay consistent.
* (The bmap-level manipulations are ok, though).
*/
void
xfs_bmap_local_to_extents_empty(
struct xfs_inode *ip,
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
ASSERT(ifp->if_bytes == 0);
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
ifp->if_flags &= ~XFS_IFINLINE;
ifp->if_flags |= XFS_IFEXTENTS;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
}
STATIC int /* error */
xfs_bmap_local_to_extents(
xfs_trans_t *tp, /* transaction pointer */
......@@ -1174,9 +1192,12 @@ xfs_bmap_local_to_extents(
struct xfs_inode *ip,
struct xfs_ifork *ifp))
{
int error; /* error return value */
int error = 0;
int flags; /* logging flags returned */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_alloc_arg_t args; /* allocation arguments */
xfs_buf_t *bp; /* buffer for extent block */
xfs_bmbt_rec_host_t *ep; /* extent record pointer */
/*
* We don't want to deal with the case of keeping inode data inline yet.
......@@ -1185,68 +1206,65 @@ xfs_bmap_local_to_extents(
ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
if (!ifp->if_bytes) {
xfs_bmap_local_to_extents_empty(ip, whichfork);
flags = XFS_ILOG_CORE;
goto done;
}
flags = 0;
error = 0;
if (ifp->if_bytes) {
xfs_alloc_arg_t args; /* allocation arguments */
xfs_buf_t *bp; /* buffer for extent block */
xfs_bmbt_rec_host_t *ep;/* extent record pointer */
ASSERT((ifp->if_flags &
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
args.firstblock = *firstblock;
/*
* Allocate a block. We know we need only one, since the
* file currently fits in an inode.
*/
if (*firstblock == NULLFSBLOCK) {
args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.fsbno = *firstblock;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
}
args.total = total;
args.minlen = args.maxlen = args.prod = 1;
error = xfs_alloc_vextent(&args);
if (error)
goto done;
/* Can't fail, the space was reserved. */
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1);
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
/* initialise the block and copy the data */
init_fn(tp, bp, ip, ifp);
/* account for the change in fork size and log everything */
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
xfs_iext_add(ifp, 0, 1);
ep = xfs_iext_get_ext(ifp, 0);
xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
trace_xfs_bmap_post_update(ip, 0,
whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
_THIS_IP_);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
xfs_trans_mod_dquot_byino(tp, ip,
XFS_TRANS_DQ_BCOUNT, 1L);
flags |= xfs_ilog_fext(whichfork);
ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
XFS_IFINLINE);
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
args.firstblock = *firstblock;
/*
* Allocate a block. We know we need only one, since the
* file currently fits in an inode.
*/
if (*firstblock == NULLFSBLOCK) {
args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
args.fsbno = *firstblock;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
}
ifp->if_flags &= ~XFS_IFINLINE;
ifp->if_flags |= XFS_IFEXTENTS;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
args.total = total;
args.minlen = args.maxlen = args.prod = 1;
error = xfs_alloc_vextent(&args);
if (error)
goto done;
/* Can't fail, the space was reserved. */
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1);
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
/* initialise the block and copy the data */
init_fn(tp, bp, ip, ifp);
/* account for the change in fork size and log everything */
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
xfs_bmap_local_to_extents_empty(ip, whichfork);
flags |= XFS_ILOG_CORE;
xfs_iext_add(ifp, 0, 1);
ep = xfs_iext_get_ext(ifp, 0);
xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
trace_xfs_bmap_post_update(ip, 0,
whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
_THIS_IP_);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
xfs_trans_mod_dquot_byino(tp, ip,
XFS_TRANS_DQ_BCOUNT, 1L);
flags |= xfs_ilog_fext(whichfork);
done:
*logflagsp = flags;
return error;
......@@ -1322,25 +1340,6 @@ xfs_bmap_add_attrfork_extents(
return error;
}
/*
* Block initialisation function for local to extent format conversion.
*
* This shouldn't actually be called by anyone, so make sure debug kernels cause
* a noticable failure.
*/
STATIC void
xfs_bmap_local_to_extents_init_fn(
struct xfs_trans *tp,
struct xfs_buf *bp,
struct xfs_inode *ip,
struct xfs_ifork *ifp)
{
ASSERT(0);
bp->b_ops = &xfs_bmbt_buf_ops;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
}
/*
* Called from xfs_bmap_add_attrfork to handle local format files. Each
* different data fork content type needs a different callout to do the
......@@ -1381,9 +1380,9 @@ xfs_bmap_add_attrfork_local(
flags, XFS_DATA_FORK,
xfs_symlink_local_to_remote);
return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
XFS_DATA_FORK,
xfs_bmap_local_to_extents_init_fn);
/* should only be called for types that support local format data */
ASSERT(0);
return EFSCORRUPTED;
}
/*
......@@ -4907,20 +4906,19 @@ xfs_bmapi_write(
orig_mval = mval;
orig_nmap = *nmap;
#endif
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL);
ASSERT(len > 0);
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
return XFS_ERROR(EFSCORRUPTED);
......@@ -4933,37 +4931,6 @@ xfs_bmapi_write(
XFS_STATS_INC(xs_blk_mapw);
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
/*
* XXX (dgc): This assumes we are only called for inodes that
* contain content neutral data in local format. Anything that
* contains caller-specific data in local format that needs
* transformation to move to a block format needs to do the
* conversion to extent format itself.
*
* Directory data forks and attribute forks handle this
* themselves, but with the addition of metadata verifiers every
* data fork in local format now contains caller specific data
* and as such conversion through this function is likely to be
* broken.
*
* The only likely user of this branch is for remote symlinks,
* but we cannot overwrite the data fork contents of the symlink
* (EEXIST occurs higher up the stack) and so it will never go
* from local format to extent format here. Hence I don't think
* this branch is ever executed intentionally and we should
* consider removing it and asserting that xfs_bmapi_write()
* cannot be called directly on local format forks. i.e. callers
* are completely responsible for local to extent format
* conversion, not xfs_bmapi_write().
*/
error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
&bma.logflags, whichfork,
xfs_bmap_local_to_extents_init_fn);
if (error)
goto error0;
}
if (*firstblock == NULLFSBLOCK) {
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
......
......@@ -172,6 +172,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
#endif
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
struct xfs_bmap_free *flist, struct xfs_mount *mp);
void xfs_bmap_cancel(struct xfs_bmap_free *flist);
......
......@@ -132,9 +132,6 @@ typedef enum xfs_dinode_fmt {
#define XFS_LITINO(mp, version) \
((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
#define XFS_BROOT_SIZE_ADJ(ip) \
(XFS_BMBT_BLOCK_LEN((ip)->i_mount) - sizeof(xfs_bmdr_block_t))
/*
* Inode data & attribute fork sizes, per inode.
*/
......
......@@ -29,6 +29,7 @@
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_buf_item.h"
#include "xfs_dir2.h"
#include "xfs_dir2_format.h"
......@@ -1164,13 +1165,15 @@ xfs_dir2_sf_to_block(
__be16 *tagp; /* end of data entry */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
struct xfs_ifork *ifp;
trace_xfs_dir2_sf_to_block(args);
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
ASSERT(ifp->if_flags & XFS_IFINLINE);
/*
* Bomb out if the shortform directory is way too short.
*/
......@@ -1179,22 +1182,23 @@ xfs_dir2_sf_to_block(
return XFS_ERROR(EIO);
}
oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data;
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
ASSERT(ifp->if_bytes == dp->i_d.di_size);
ASSERT(ifp->if_u1.if_data != NULL);
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
ASSERT(dp->i_d.di_nextents == 0);
/*
* Copy the directory into a temporary buffer.
* Then pitch the incore inode data so we can make extents.
*/
sfp = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
memcpy(sfp, oldsfp, dp->i_df.if_bytes);
sfp = kmem_alloc(ifp->if_bytes, KM_SLEEP);
memcpy(sfp, oldsfp, ifp->if_bytes);
xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
xfs_bmap_local_to_extents_empty(dp, XFS_DATA_FORK);
dp->i_d.di_size = 0;
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
/*
* Add block 0 to the inode.
......
......@@ -936,6 +936,7 @@ xfs_qm_dqput_final(
{
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
struct xfs_dquot *gdqp;
struct xfs_dquot *pdqp;
trace_xfs_dqput_free(dqp);
......@@ -949,21 +950,29 @@ xfs_qm_dqput_final(
/*
* If we just added a udquot to the freelist, then we want to release
* the gdquot reference that it (probably) has. Otherwise it'll keep
* the gdquot from getting reclaimed.
* the gdquot/pdquot reference that it (probably) has. Otherwise it'll
* keep the gdquot/pdquot from getting reclaimed.
*/
gdqp = dqp->q_gdquot;
if (gdqp) {
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
pdqp = dqp->q_pdquot;
if (pdqp) {
xfs_dqlock(pdqp);
dqp->q_pdquot = NULL;
}
xfs_dqunlock(dqp);
/*
* If we had a group quota hint, release it now.
* If we had a group/project quota hint, release it now.
*/
if (gdqp)
xfs_qm_dqput(gdqp);
if (pdqp)
xfs_qm_dqput(pdqp);
}
/*
......
......@@ -53,6 +53,7 @@ typedef struct xfs_dquot {
xfs_fileoff_t q_fileoffset; /* offset in quotas file */
struct xfs_dquot*q_gdquot; /* group dquot, hint only */
struct xfs_dquot*q_pdquot; /* project dquot, hint only */
xfs_disk_dquot_t q_core; /* actual usage & quotas */
xfs_dq_logitem_t q_logitem; /* dquot log item */
xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
......@@ -118,8 +119,9 @@ static inline int xfs_this_quota_on(struct xfs_mount *mp, int type)
case XFS_DQ_USER:
return XFS_IS_UQUOTA_ON(mp);
case XFS_DQ_GROUP:
return XFS_IS_GQUOTA_ON(mp);
case XFS_DQ_PROJ:
return XFS_IS_OQUOTA_ON(mp);
return XFS_IS_PQUOTA_ON(mp);
default:
return 0;
}
......@@ -131,8 +133,9 @@ static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type)
case XFS_DQ_USER:
return ip->i_udquot;
case XFS_DQ_GROUP:
case XFS_DQ_PROJ:
return ip->i_gdquot;
case XFS_DQ_PROJ:
return ip->i_pdquot;
default:
return NULL;
}
......
......@@ -337,6 +337,7 @@ xfs_iget_cache_miss(
iflags |= XFS_IDONTCACHE;
ip->i_udquot = NULL;
ip->i_gdquot = NULL;
ip->i_pdquot = NULL;
xfs_iflags_set(ip, iflags);
/* insert the new inode */
......
......@@ -2156,8 +2156,8 @@ xfs_iroot_realloc(
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
(int)new_size);
ifp->if_broot_bytes = (int)new_size;
ASSERT(ifp->if_broot_bytes <=
XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip));
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
XFS_IFORK_SIZE(ip, whichfork));
memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
return;
}
......@@ -2210,8 +2210,9 @@ xfs_iroot_realloc(
kmem_free(ifp->if_broot);
ifp->if_broot = new_broot;
ifp->if_broot_bytes = (int)new_size;
ASSERT(ifp->if_broot_bytes <=
XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ(ip));
if (ifp->if_broot)
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
XFS_IFORK_SIZE(ip, whichfork));
return;
}
......@@ -2522,9 +2523,8 @@ xfs_iflush_fork(
if ((iip->ili_fields & brootflag[whichfork]) &&
(ifp->if_broot_bytes > 0)) {
ASSERT(ifp->if_broot != NULL);
ASSERT(ifp->if_broot_bytes <=
(XFS_IFORK_SIZE(ip, whichfork) +
XFS_BROOT_SIZE_ADJ(ip)));
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
XFS_IFORK_SIZE(ip, whichfork));
xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
(xfs_bmdr_block_t *)cp,
XFS_DFORK_SIZE(dip, mp, whichfork));
......
......@@ -250,6 +250,7 @@ typedef struct xfs_inode {
struct xfs_mount *i_mount; /* fs mount struct ptr */
struct xfs_dquot *i_udquot; /* user dquot */
struct xfs_dquot *i_gdquot; /* group dquot */
struct xfs_dquot *i_pdquot; /* project dquot */
/* Inode location stuff */
xfs_ino_t i_ino; /* inode number (agno/agino)*/
......
......@@ -248,7 +248,7 @@ xfs_open_by_handle(
goto out_dput;
}
fd = get_unused_fd();
fd = get_unused_fd_flags(0);
if (fd < 0) {
error = fd;
goto out_dput;
......@@ -928,7 +928,7 @@ xfs_ioctl_setattr(
struct xfs_trans *tp;
unsigned int lock_flags = 0;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
int code;
......@@ -957,7 +957,7 @@ xfs_ioctl_setattr(
if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
ip->i_d.di_gid, fa->fsx_projid,
XFS_QMOPT_PQUOTA, &udqp, &gdqp);
XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
if (code)
return code;
}
......@@ -994,8 +994,8 @@ xfs_ioctl_setattr(
XFS_IS_PQUOTA_ON(mp) &&
xfs_get_projid(ip) != fa->fsx_projid) {
ASSERT(tp);
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ?
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL,
pdqp, capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
goto error_return;
......@@ -1113,7 +1113,7 @@ xfs_ioctl_setattr(
if (xfs_get_projid(ip) != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot, gdqp);
&ip->i_pdquot, pdqp);
}
xfs_set_projid(ip, fa->fsx_projid);
......@@ -1160,13 +1160,13 @@ xfs_ioctl_setattr(
*/
xfs_qm_dqrele(olddquot);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
return code;
error_return:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
xfs_trans_cancel(tp, 0);
if (lock_flags)
xfs_iunlock(ip, lock_flags);
......
......@@ -467,9 +467,6 @@ xfs_setattr_mode(
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
ip->i_d.di_mode &= S_IFMT;
ip->i_d.di_mode |= mode & ~S_IFMT;
......@@ -495,15 +492,18 @@ xfs_setattr_nonsize(
trace_xfs_setattr(ip);
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
/* If acls are being inherited, we already have this checked */
if (!(flags & XFS_ATTR_NOACL)) {
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
error = -inode_change_ok(inode, iattr);
if (error)
return XFS_ERROR(error);
error = -inode_change_ok(inode, iattr);
if (error)
return XFS_ERROR(error);
}
ASSERT((mask & ATTR_SIZE) == 0);
......@@ -539,7 +539,7 @@ xfs_setattr_nonsize(
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
qflags, &udqp, &gdqp);
qflags, &udqp, &gdqp, NULL);
if (error)
return error;
}
......@@ -575,7 +575,7 @@ xfs_setattr_nonsize(
(XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
ASSERT(tp);
error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ?
NULL, capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (error) /* out of quota */
goto out_trans_cancel;
......
......@@ -221,7 +221,6 @@ xfs_bulkstat(
char __user *ubufp; /* pointer into user's buffer */
int ubelem; /* spaces used in user's buffer */
int ubused; /* bytes used by formatter */
xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */
/*
* Get the last inode value, see if there's nothing to do.
......@@ -263,7 +262,6 @@ xfs_bulkstat(
rval = 0;
while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
cond_resched();
bp = NULL;
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
if (error) {
/*
......@@ -436,27 +434,7 @@ xfs_bulkstat(
irbp->ir_freecount < XFS_INODES_PER_CHUNK;
chunkidx++, clustidx++, agino++) {
ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
/*
* Recompute agbno if this is the
* first inode of the cluster.
*
* Careful with clustidx. There can be
* multiple clusters per chunk, a single
* cluster per chunk or a cluster that has
* inodes represented from several different
* chunks (if blocksize is large).
*
* Because of this, the starting clustidx is
* initialized to zero in this loop but must
* later be reset after reading in the cluster
* buffer.
*/
if ((chunkidx & (nicluster - 1)) == 0) {
agbno = XFS_AGINO_TO_AGBNO(mp,
irbp->ir_startino) +
((chunkidx & nimask) >>
mp->m_sb.sb_inopblog);
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
/*
* Skip if this inode is free.
......@@ -502,10 +480,6 @@ xfs_bulkstat(
cond_resched();
}
if (bp)
xfs_buf_relse(bp);
/*
* Set up for the next loop iteration.
*/
......
This diff is collapsed.
......@@ -44,9 +44,11 @@ extern struct kmem_zone *xfs_qm_dqtrxzone;
typedef struct xfs_quotainfo {
struct radix_tree_root qi_uquota_tree;
struct radix_tree_root qi_gquota_tree;
struct radix_tree_root qi_pquota_tree;
struct mutex qi_tree_lock;
xfs_inode_t *qi_uquotaip; /* user quota inode */
xfs_inode_t *qi_gquotaip; /* group quota inode */
struct xfs_inode *qi_uquotaip; /* user quota inode */
struct xfs_inode *qi_gquotaip; /* group quota inode */
struct xfs_inode *qi_pquotaip; /* project quota inode */
struct list_head qi_lru_list;
struct mutex qi_lru_lock;
int qi_lru_count;
......@@ -78,8 +80,9 @@ xfs_dquot_tree(
case XFS_DQ_USER:
return &qi->qi_uquota_tree;
case XFS_DQ_GROUP:
case XFS_DQ_PROJ:
return &qi->qi_gquota_tree;
case XFS_DQ_PROJ:
return &qi->qi_pquota_tree;
default:
ASSERT(0);
}
......@@ -93,8 +96,9 @@ xfs_dq_to_quota_inode(struct xfs_dquot *dqp)
case XFS_DQ_USER:
return dqp->q_mount->m_quotainfo->qi_uquotaip;
case XFS_DQ_GROUP:
case XFS_DQ_PROJ:
return dqp->q_mount->m_quotainfo->qi_gquotaip;
case XFS_DQ_PROJ:
return dqp->q_mount->m_quotainfo->qi_pquotaip;
default:
ASSERT(0);
}
......@@ -107,18 +111,20 @@ extern void xfs_trans_mod_dquot(struct xfs_trans *,
struct xfs_dquot *, uint, long);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
struct xfs_dquot *, long, long, uint);
struct xfs_dquot *, struct xfs_dquot *,
long, long, uint);
extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *);
extern void xfs_trans_log_dquot(struct xfs_trans *, struct xfs_dquot *);
/*
* We keep the usr and grp dquots separately so that locking will be easier
* to do at commit time. All transactions that we know of at this point
* We keep the usr, grp, and prj dquots separately so that locking will be
* easier to do at commit time. All transactions that we know of at this point
* affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value.
*/
enum {
XFS_QM_TRANS_USR = 0,
XFS_QM_TRANS_GRP,
XFS_QM_TRANS_PRJ,
XFS_QM_TRANS_DQTYPES
};
#define XFS_QM_TRANS_MAXDQS 2
......
......@@ -112,16 +112,16 @@ xfs_qm_newmount(
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
(pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
(!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
(!gquotaondisk && XFS_IS_GQUOTA_ON(mp)) ||
(pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
(!pquotaondisk && XFS_IS_PQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
xfs_warn(mp, "please mount with%s%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(pquotaondisk ? " prjquota" : ""),
(gquotaondisk ? " grpquota" : ""));
(gquotaondisk ? " grpquota" : ""),
(pquotaondisk ? " prjquota" : ""));
return XFS_ERROR(EPERM);
}
......
......@@ -119,7 +119,8 @@ xfs_qm_scall_quotaoff(
dqtype |= XFS_QMOPT_GQUOTA;
flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
inactivate_flags |= XFS_GQUOTA_ACTIVE;
} else if (flags & XFS_PQUOTA_ACCT) {
}
if (flags & XFS_PQUOTA_ACCT) {
dqtype |= XFS_QMOPT_PQUOTA;
flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
inactivate_flags |= XFS_PQUOTA_ACTIVE;
......@@ -198,10 +199,9 @@ xfs_qm_scall_quotaoff(
}
/*
* If quotas is completely disabled, close shop.
* If all quotas are completely turned off, close shop.
*/
if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
if (mp->m_qflags == 0) {
mutex_unlock(&q->qi_quotaofflock);
xfs_qm_destroy_quotainfo(mp);
return (0);
......@@ -214,10 +214,14 @@ xfs_qm_scall_quotaoff(
IRELE(q->qi_uquotaip);
q->qi_uquotaip = NULL;
}
if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
IRELE(q->qi_gquotaip);
q->qi_gquotaip = NULL;
}
if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
IRELE(q->qi_pquotaip);
q->qi_pquotaip = NULL;
}
out_unlock:
mutex_unlock(&q->qi_quotaofflock);
......@@ -859,9 +863,11 @@ xfs_dqrele_inode(
{
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
ASSERT(ip->i_pdquot == NULL);
return 0;
}
......@@ -870,10 +876,14 @@ xfs_dqrele_inode(
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
xfs_qm_dqrele(ip->i_pdquot);
ip->i_pdquot = NULL;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
}
......
......@@ -108,11 +108,28 @@ typedef struct xfs_dqblk {
{ XFS_DQ_FREEING, "FREEING" }
/*
* In the worst case, when both user and group quotas are on,
* we can have a max of three dquots changing in a single transaction.
* We have the possibility of all three quota types being active at once, and
* hence free space modification requires modification of all three current
* dquots in a single transaction. For this case we need to have a reservation
* of at least 3 dquots.
*
* However, a chmod operation can change both UID and GID in a single
* transaction, resulting in requiring {old, new} x {uid, gid} dquots to be
* modified. Hence for this case we need to reserve space for at least 4 dquots.
*
* And in the worst case, there's a rename operation that can be modifying up to
* 4 inodes with dquots attached to them. In reality, the only inodes that can
* have their dquots modified are the source and destination directory inodes
* due to directory name creation and removal. That can require space allocation
* and/or freeing on both directory inodes, and hence all three dquots on each
* inode can be modified. And if the directories are world writeable, all the
* dquots can be unique and so 6 dquots can be modified....
*
* And, of course, we also need to take into account the dquot log format item
* used to describe each dquot.
*/
#define XFS_DQUOT_LOGRES(mp) (sizeof(xfs_disk_dquot_t) * 3)
#define XFS_DQUOT_LOGRES(mp) \
((sizeof(struct xfs_dq_logformat) + sizeof(struct xfs_disk_dquot)) * 6)
/*
* These are the structures used to lay out dquots and quotaoff
......@@ -271,10 +288,10 @@ typedef struct xfs_qoff_logformat {
* we didn't have the inode locked, the appropriate dquot(s) will be
* attached atomically.
*/
#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\
(ip)->i_udquot == NULL) || \
(XFS_IS_OQUOTA_ON(mp) && \
(ip)->i_gdquot == NULL))
#define XFS_NOT_DQATTACHED(mp, ip) \
((XFS_IS_UQUOTA_ON(mp) && (ip)->i_udquot == NULL) || \
(XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \
(XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL))
#define XFS_QM_NEED_QUOTACHECK(mp) \
((XFS_IS_UQUOTA_ON(mp) && \
......@@ -284,14 +301,6 @@ typedef struct xfs_qoff_logformat {
(XFS_IS_PQUOTA_ON(mp) && \
(mp->m_sb.sb_qflags & XFS_PQUOTA_CHKD) == 0))
#define XFS_MOUNT_QUOTA_SET1 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\
XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD)
#define XFS_MOUNT_QUOTA_SET2 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\
XFS_PQUOTA_ENFD|XFS_PQUOTA_CHKD)
#define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\
XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD|\
......@@ -329,17 +338,18 @@ extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *,
struct xfs_inode *, long, long, uint);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
struct xfs_dquot *, long, long, uint);
struct xfs_dquot *, struct xfs_dquot *, long, long, uint);
extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint,
struct xfs_dquot **, struct xfs_dquot **);
struct xfs_dquot **, struct xfs_dquot **, struct xfs_dquot **);
extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *);
struct xfs_dquot *, struct xfs_dquot *, struct xfs_dquot *);
extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *,
struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *);
extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *, uint);
struct xfs_dquot *, struct xfs_dquot *,
struct xfs_dquot *, uint);
extern int xfs_qm_dqattach(struct xfs_inode *, uint);
extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
extern void xfs_qm_dqdetach(struct xfs_inode *);
......@@ -353,10 +363,12 @@ extern void xfs_qm_unmount_quotas(struct xfs_mount *);
#else
static inline int
xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp)
uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp,
struct xfs_dquot **pdqp)
{
*udqp = NULL;
*gdqp = NULL;
*pdqp = NULL;
return 0;
}
#define xfs_trans_dup_dqinfo(tp, tp2)
......@@ -371,14 +383,15 @@ static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
}
static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
struct xfs_mount *mp, struct xfs_dquot *udqp,
struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
long nblks, long nions, uint flags)
{
return 0;
}
#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
#define xfs_qm_vop_create_dqattach(tp, ip, u, g, p)
#define xfs_qm_vop_rename_dqattach(it) (0)
#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
#define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0)
#define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0)
#define xfs_qm_dqattach(ip, fl) (0)
#define xfs_qm_dqattach_locked(ip, fl) (0)
#define xfs_qm_dqdetach(ip)
......@@ -392,8 +405,8 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags)
#define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \
xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \
#define xfs_trans_reserve_quota(tp, mp, ud, gd, pd, nb, ni, f) \
xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
f | XFS_QMOPT_RES_REGBLKS)
extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
......
......@@ -360,6 +360,7 @@ xfs_symlink(
prid_t prid;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
uint resblks;
*ipp = NULL;
......@@ -386,7 +387,7 @@ xfs_symlink(
* Make sure that we have allocated dquot(s) on disk.
*/
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp, &pdqp);
if (error)
goto std_return;
......@@ -427,7 +428,8 @@ xfs_symlink(
/*
* Reserve disk quota : blocks and inode.
*/
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
pdqp, resblks, 1, 0);
if (error)
goto error_return;
......@@ -465,7 +467,7 @@ xfs_symlink(
/*
* Also attach the dquot(s) to it, if applicable.
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
if (resblks)
resblks -= XFS_IALLOC_SPACE_RES(mp);
......@@ -563,6 +565,7 @@ xfs_symlink(
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
*ipp = ip;
return 0;
......@@ -576,6 +579,7 @@ xfs_symlink(
xfs_trans_cancel(tp, cancel_flags);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
......
......@@ -163,8 +163,10 @@ xfs_trans_mod_dquot_byino(
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
(void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
}
STATIC struct xfs_dqtrx *
......@@ -177,8 +179,12 @@ xfs_trans_get_dqtrx(
if (XFS_QM_ISUDQ(dqp))
qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
else
else if (XFS_QM_ISGDQ(dqp))
qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
else if (XFS_QM_ISPDQ(dqp))
qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
else
return NULL;
for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
if (qa[i].qt_dquot == NULL ||
......@@ -291,11 +297,10 @@ xfs_trans_mod_dquot(
/*
* Given an array of dqtrx structures, lock all the dquots associated
* and join them to the transaction, provided they have been modified.
* We know that the highest number of dquots (of one type - usr OR grp),
* involved in a transaction is 2 and that both usr and grp combined - 3.
* So, we don't attempt to make this very generic.
* Given an array of dqtrx structures, lock all the dquots associated and join
* them to the transaction, provided they have been modified. We know that the
* highest number of dquots of one type - usr, grp OR prj - involved in a
* transaction is 2 so we don't need to make this very generic.
*/
STATIC void
xfs_trans_dqlockedjoin(
......@@ -728,8 +733,8 @@ xfs_trans_dqresv(
/*
* Given dquot(s), make disk block and/or inode reservations against them.
* The fact that this does the reservation against both the usr and
* grp/prj quotas is important, because this follows a both-or-nothing
* The fact that this does the reservation against user, group and
* project quotas is important, because this follows a all-or-nothing
* approach.
*
* flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
......@@ -744,6 +749,7 @@ xfs_trans_reserve_quota_bydquots(
struct xfs_mount *mp,
struct xfs_dquot *udqp,
struct xfs_dquot *gdqp,
struct xfs_dquot *pdqp,
long nblks,
long ninos,
uint flags)
......@@ -771,11 +777,21 @@ xfs_trans_reserve_quota_bydquots(
goto unwind_usr;
}
if (pdqp) {
error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
if (error)
goto unwind_grp;
}
/*
* Didn't change anything critical, so, no need to log
*/
return 0;
unwind_grp:
flags |= XFS_QMOPT_FORCE_RES;
if (gdqp)
xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
unwind_usr:
flags |= XFS_QMOPT_FORCE_RES;
if (udqp)
......@@ -817,6 +833,7 @@ xfs_trans_reserve_quota_nblks(
*/
return xfs_trans_reserve_quota_bydquots(tp, mp,
ip->i_udquot, ip->i_gdquot,
ip->i_pdquot,
nblks, ninos, flags);
}
......
......@@ -489,6 +489,7 @@ xfs_create(
prid_t prid;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
uint resblks;
uint log_res;
uint log_count;
......@@ -507,7 +508,8 @@ xfs_create(
* Make sure that we have allocated dquot(s) on disk.
*/
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
return error;
......@@ -559,7 +561,8 @@ xfs_create(
/*
* Reserve disk quota and the inode.
*/
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
pdqp, resblks, 1, 0);
if (error)
goto out_trans_cancel;
......@@ -623,7 +626,7 @@ xfs_create(
* These ids of the inode couldn't have changed since the new
* inode has been locked ever since it was created.
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
......@@ -635,6 +638,7 @@ xfs_create(
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
*ipp = ip;
return 0;
......@@ -656,6 +660,7 @@ xfs_create(
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
......@@ -1568,7 +1573,7 @@ xfs_free_file_space(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota(tp, mp,
ip->i_udquot, ip->i_gdquot,
ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
resblks, 0, XFS_QMOPT_RES_REGBLKS);
if (error)
goto error1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment