Commit 1d32bdaf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-for-linus-v3.14-rc1' of git://oss.sgi.com/xfs/xfs

Pull xfs update from Ben Myers:
 "This is primarily bug fixes, many of which you already have.  New
  stuff includes a series to decouple the in-memory and on-disk log
  format, helpers in the area of inode clusters, and i_version handling.

  We decided to try to use more topic branches this release, so there
  are some merge commits in there on account of that.  I'm afraid I
  didn't do a good job of putting meaningful comments in the first
  couple of merges.  Sorry about that.  I think I have the hang of it
  now.

  For 3.14-rc1 there are fixes in the areas of remote attributes,
  discard, growfs, memory leaks in recovery, directory v2, quotas, the
  MAINTAINERS file, allocation alignment, extent list locking, and in
  xfs_bmapi_allocate.  There are cleanups in xfs_setsize_buftarg,
  removing unused macros, quotas, setattr, and freeing of inode
  clusters.  The in-memory and on-disk log format have been decoupled, a
  common helper to calculate the number of blocks in an inode cluster
  has been added, and handling of i_version has been pulled into the
  filesystems that use it.

   - cleanup in xfs_setsize_buftarg
   - removal of remaining unused flags for vop toss/flush/flushinval
   - fix for memory corruption in xfs_attrlist_by_handle
   - fix for out-of-date comment in xfs_trans_dqlockedjoin
   - fix for discard if range length is less than one block
   - fix for overrun of agfl buffer using growfs on v4 superblock
     filesystems
   - pull i_version handling out into the filesystems that use it
   - don't leak recovery items on error
   - fix for memory leak in xfs_dir2_node_removename
   - several cleanups for quotas
   - fix bad assertion in xfs_qm_vop_create_dqattach
   - cleanup for xfs_setattr_mode, and add xfs_setattr_time
   - fix quota assert in xfs_setattr_nonsize
   - fix an infinite loop when turning off group/project quota before
     user quota
   - fix for temporary buffer allocation failure in xfs_dir2_block_to_sf
     with large directory block sizes
   - fix Dave's email address in MAINTAINERS
   - cleanup calculation of freed inode cluster blocks
   - fix alignment of initial file allocations to match filesystem
     geometry
   - decouple in-memory and on-disk log format
   - introduce a common helper to calculate the number of filesystem
     blocks in an inode cluster
   - fixes for extent list locking
   - fix for off-by-one in xfs_attr3_rmt_verify
   - fix for missing destroy_work_on_stack in xfs_bmapi_allocate"

* tag 'xfs-for-linus-v3.14-rc1' of git://oss.sgi.com/xfs/xfs: (51 commits)
  xfs: Calling destroy_work_on_stack() to pair with INIT_WORK_ONSTACK()
  xfs: fix off-by-one error in xfs_attr3_rmt_verify
  xfs: assert that we hold the ilock for extent map access
  xfs: use xfs_ilock_attr_map_shared in xfs_attr_list_int
  xfs: use xfs_ilock_attr_map_shared in xfs_attr_get
  xfs: use xfs_ilock_data_map_shared in xfs_qm_dqiterate
  xfs: use xfs_ilock_data_map_shared in xfs_qm_dqtobp
  xfs: take the ilock around xfs_bmapi_read in xfs_zero_remaining_bytes
  xfs: reinstate the ilock in xfs_readdir
  xfs: add xfs_ilock_attr_map_shared
  xfs: rename xfs_ilock_map_shared
  xfs: remove xfs_iunlock_map_shared
  xfs: no need to lock the inode in xfs_find_handle
  xfs: use xfs_icluster_size_fsb in xfs_imap
  xfs: use xfs_icluster_size_fsb in xfs_ifree_cluster
  xfs: use xfs_icluster_size_fsb in xfs_ialloc_inode_init
  xfs: use xfs_icluster_size_fsb in xfs_bulkstat
  xfs: introduce a common helper xfs_icluster_size_fsb
  xfs: get rid of XFS_IALLOC_BLOCKS macros
  xfs: get rid of XFS_INODE_CLUSTER_SIZE macros
  ...
parents 0dc3fd02 bf3964c1
...@@ -202,11 +202,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de ...@@ -202,11 +202,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
return -EPERM; return -EPERM;
} }
if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
if (attr->ia_size != inode->i_size)
inode_inc_iversion(inode);
}
if ((ia_valid & ATTR_MODE)) { if ((ia_valid & ATTR_MODE)) {
umode_t amode = attr->ia_mode; umode_t amode = attr->ia_mode;
/* Flag setting protected by i_mutex */ /* Flag setting protected by i_mutex */
......
...@@ -4354,8 +4354,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) ...@@ -4354,8 +4354,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* these flags set. For all other operations the VFS set these flags * these flags set. For all other operations the VFS set these flags
* explicitly if it wants a timestamp update. * explicitly if it wants a timestamp update.
*/ */
if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) if (newsize != oldsize) {
inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
}
if (newsize > oldsize) { if (newsize > oldsize) {
truncate_pagecache(inode, newsize); truncate_pagecache(inode, newsize);
......
...@@ -4586,6 +4586,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -4586,6 +4586,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_size > sbi->s_bitmap_maxbytes) if (attr->ia_size > sbi->s_bitmap_maxbytes)
return -EFBIG; return -EFBIG;
} }
if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
inode_inc_iversion(inode);
if (S_ISREG(inode->i_mode) && if (S_ISREG(inode->i_mode) &&
(attr->ia_size < inode->i_size)) { (attr->ia_size < inode->i_size)) {
if (ext4_should_order_data(inode)) { if (ext4_should_order_data(inode)) {
......
...@@ -1217,7 +1217,7 @@ __xfs_get_blocks( ...@@ -1217,7 +1217,7 @@ __xfs_get_blocks(
lockmode = XFS_ILOCK_EXCL; lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode); xfs_ilock(ip, lockmode);
} else { } else {
lockmode = xfs_ilock_map_shared(ip); lockmode = xfs_ilock_data_map_shared(ip);
} }
ASSERT(offset <= mp->m_super->s_maxbytes); ASSERT(offset <= mp->m_super->s_maxbytes);
......
...@@ -164,6 +164,7 @@ xfs_attr_get( ...@@ -164,6 +164,7 @@ xfs_attr_get(
{ {
int error; int error;
struct xfs_name xname; struct xfs_name xname;
uint lock_mode;
XFS_STATS_INC(xs_attr_get); XFS_STATS_INC(xs_attr_get);
...@@ -174,9 +175,9 @@ xfs_attr_get( ...@@ -174,9 +175,9 @@ xfs_attr_get(
if (error) if (error)
return error; return error;
xfs_ilock(ip, XFS_ILOCK_SHARED); lock_mode = xfs_ilock_attr_map_shared(ip);
error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags);
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, lock_mode);
return(error); return(error);
} }
......
...@@ -507,17 +507,17 @@ xfs_attr_list_int( ...@@ -507,17 +507,17 @@ xfs_attr_list_int(
{ {
int error; int error;
xfs_inode_t *dp = context->dp; xfs_inode_t *dp = context->dp;
uint lock_mode;
XFS_STATS_INC(xs_attr_list); XFS_STATS_INC(xs_attr_list);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return EIO; return EIO;
xfs_ilock(dp, XFS_ILOCK_SHARED);
/* /*
* Decide on what work routines to call based on the inode size. * Decide on what work routines to call based on the inode size.
*/ */
lock_mode = xfs_ilock_attr_map_shared(dp);
if (!xfs_inode_hasattr(dp)) { if (!xfs_inode_hasattr(dp)) {
error = 0; error = 0;
} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
...@@ -527,9 +527,7 @@ xfs_attr_list_int( ...@@ -527,9 +527,7 @@ xfs_attr_list_int(
} else { } else {
error = xfs_attr_node_list(context); error = xfs_attr_node_list(context);
} }
xfs_iunlock(dp, lock_mode);
xfs_iunlock(dp, XFS_ILOCK_SHARED);
return error; return error;
} }
......
...@@ -4013,6 +4013,7 @@ xfs_bmapi_read( ...@@ -4013,6 +4013,7 @@ xfs_bmapi_read(
ASSERT(*nmap >= 1); ASSERT(*nmap >= 1);
ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
XFS_BMAPI_IGSTATE))); XFS_BMAPI_IGSTATE)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
...@@ -4207,6 +4208,7 @@ xfs_bmapi_delay( ...@@ -4207,6 +4208,7 @@ xfs_bmapi_delay(
ASSERT(*nmap >= 1); ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
...@@ -4500,6 +4502,7 @@ xfs_bmapi_write( ...@@ -4500,6 +4502,7 @@ xfs_bmapi_write(
ASSERT(tp != NULL); ASSERT(tp != NULL);
ASSERT(len > 0); ASSERT(len > 0);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
...@@ -5051,6 +5054,7 @@ xfs_bunmapi( ...@@ -5051,6 +5054,7 @@ xfs_bunmapi(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(len > 0); ASSERT(len > 0);
ASSERT(nexts >= 0); ASSERT(nexts >= 0);
......
...@@ -618,22 +618,27 @@ xfs_getbmap( ...@@ -618,22 +618,27 @@ xfs_getbmap(
return XFS_ERROR(ENOMEM); return XFS_ERROR(ENOMEM);
xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { if (whichfork == XFS_DATA_FORK) {
if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { if (!(iflags & BMV_IF_DELALLOC) &&
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error) if (error)
goto out_unlock_iolock; goto out_unlock_iolock;
/*
* Even after flushing the inode, there can still be
* delalloc blocks on the inode beyond EOF due to
* speculative preallocation. These are not removed
* until the release function is called or the inode
* is inactivated. Hence we cannot assert here that
* ip->i_delayed_blks == 0.
*/
} }
/*
* even after flushing the inode, there can still be delalloc
* blocks on the inode beyond EOF due to speculative
* preallocation. These are not removed until the release
* function is called or the inode is inactivated. Hence we
* cannot assert here that ip->i_delayed_blks == 0.
*/
}
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_data_map_shared(ip);
} else {
lock = xfs_ilock_attr_map_shared(ip);
}
/* /*
* Don't let nex be bigger than the number of extents * Don't let nex be bigger than the number of extents
...@@ -738,7 +743,7 @@ xfs_getbmap( ...@@ -738,7 +743,7 @@ xfs_getbmap(
out_free_map: out_free_map:
kmem_free(map); kmem_free(map);
out_unlock_ilock: out_unlock_ilock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock(ip, lock);
out_unlock_iolock: out_unlock_iolock:
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
...@@ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes( ...@@ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes(
xfs_buf_unlock(bp); xfs_buf_unlock(bp);
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
uint lock_mode;
offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1; nimap = 1;
lock_mode = xfs_ilock_data_map_shared(ip);
error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
xfs_iunlock(ip, lock_mode);
if (error || nimap < 1) if (error || nimap < 1)
break; break;
ASSERT(imap.br_blockcount >= 1); ASSERT(imap.br_blockcount >= 1);
......
...@@ -1593,12 +1593,11 @@ xfs_free_buftarg( ...@@ -1593,12 +1593,11 @@ xfs_free_buftarg(
kmem_free(btp); kmem_free(btp);
} }
STATIC int int
xfs_setsize_buftarg_flags( xfs_setsize_buftarg(
xfs_buftarg_t *btp, xfs_buftarg_t *btp,
unsigned int blocksize, unsigned int blocksize,
unsigned int sectorsize, unsigned int sectorsize)
int verbose)
{ {
btp->bt_bsize = blocksize; btp->bt_bsize = blocksize;
btp->bt_sshift = ffs(sectorsize) - 1; btp->bt_sshift = ffs(sectorsize) - 1;
...@@ -1619,26 +1618,17 @@ xfs_setsize_buftarg_flags( ...@@ -1619,26 +1618,17 @@ xfs_setsize_buftarg_flags(
} }
/* /*
* When allocating the initial buffer target we have not yet * When allocating the initial buffer target we have not yet
* read in the superblock, so don't know what sized sectors * read in the superblock, so don't know what sized sectors
* are being used at this early stage. Play safe. * are being used at this early stage. Play safe.
*/ */
STATIC int STATIC int
xfs_setsize_buftarg_early( xfs_setsize_buftarg_early(
xfs_buftarg_t *btp, xfs_buftarg_t *btp,
struct block_device *bdev) struct block_device *bdev)
{ {
return xfs_setsize_buftarg_flags(btp, return xfs_setsize_buftarg(btp, PAGE_SIZE,
PAGE_SIZE, bdev_logical_block_size(bdev), 0); bdev_logical_block_size(bdev));
}
int
xfs_setsize_buftarg(
xfs_buftarg_t *btp,
unsigned int blocksize,
unsigned int sectorsize)
{
return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
} }
xfs_buftarg_t * xfs_buftarg_t *
......
...@@ -182,21 +182,47 @@ xfs_buf_item_size( ...@@ -182,21 +182,47 @@ xfs_buf_item_size(
trace_xfs_buf_item_size(bip); trace_xfs_buf_item_size(bip);
} }
static struct xfs_log_iovec * static inline void
xfs_buf_item_copy_iovec(
struct xfs_log_vec *lv,
struct xfs_log_iovec **vecp,
struct xfs_buf *bp,
uint offset,
int first_bit,
uint nbits)
{
offset += first_bit * XFS_BLF_CHUNK;
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
xfs_buf_offset(bp, offset),
nbits * XFS_BLF_CHUNK);
}
static inline bool
xfs_buf_item_straddle(
struct xfs_buf *bp,
uint offset,
int next_bit,
int last_bit)
{
return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
(xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
XFS_BLF_CHUNK);
}
static void
xfs_buf_item_format_segment( xfs_buf_item_format_segment(
struct xfs_buf_log_item *bip, struct xfs_buf_log_item *bip,
struct xfs_log_iovec *vecp, struct xfs_log_vec *lv,
struct xfs_log_iovec **vecp,
uint offset, uint offset,
struct xfs_buf_log_format *blfp) struct xfs_buf_log_format *blfp)
{ {
struct xfs_buf *bp = bip->bli_buf; struct xfs_buf *bp = bip->bli_buf;
uint base_size; uint base_size;
uint nvecs;
int first_bit; int first_bit;
int last_bit; int last_bit;
int next_bit; int next_bit;
uint nbits; uint nbits;
uint buffer_offset;
/* copy the flags across from the base format item */ /* copy the flags across from the base format item */
blfp->blf_flags = bip->__bli_format.blf_flags; blfp->blf_flags = bip->__bli_format.blf_flags;
...@@ -208,21 +234,17 @@ xfs_buf_item_format_segment( ...@@ -208,21 +234,17 @@ xfs_buf_item_format_segment(
*/ */
base_size = xfs_buf_log_format_size(blfp); base_size = xfs_buf_log_format_size(blfp);
nvecs = 0;
first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
/* /*
* If the map is not be dirty in the transaction, mark * If the map is not be dirty in the transaction, mark
* the size as zero and do not advance the vector pointer. * the size as zero and do not advance the vector pointer.
*/ */
goto out; return;
} }
vecp->i_addr = blfp; blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
vecp->i_len = base_size; blfp->blf_size = 1;
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
nvecs = 1;
if (bip->bli_flags & XFS_BLI_STALE) { if (bip->bli_flags & XFS_BLI_STALE) {
/* /*
...@@ -232,14 +254,13 @@ xfs_buf_item_format_segment( ...@@ -232,14 +254,13 @@ xfs_buf_item_format_segment(
*/ */
trace_xfs_buf_item_format_stale(bip); trace_xfs_buf_item_format_stale(bip);
ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
goto out; return;
} }
/* /*
* Fill in an iovec for each set of contiguous chunks. * Fill in an iovec for each set of contiguous chunks.
*/ */
last_bit = first_bit; last_bit = first_bit;
nbits = 1; nbits = 1;
for (;;) { for (;;) {
...@@ -252,42 +273,22 @@ xfs_buf_item_format_segment( ...@@ -252,42 +273,22 @@ xfs_buf_item_format_segment(
next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
(uint)last_bit + 1); (uint)last_bit + 1);
/* /*
* If we run out of bits fill in the last iovec and get * If we run out of bits fill in the last iovec and get out of
* out of the loop. * the loop. Else if we start a new set of bits then fill in
* Else if we start a new set of bits then fill in the * the iovec for the series we were looking at and start
* iovec for the series we were looking at and start * counting the bits in the new one. Else we're still in the
* counting the bits in the new one. * same set of bits so just keep counting and scanning.
* Else we're still in the same set of bits so just
* keep counting and scanning.
*/ */
if (next_bit == -1) { if (next_bit == -1) {
buffer_offset = offset + first_bit * XFS_BLF_CHUNK; xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
vecp->i_addr = xfs_buf_offset(bp, buffer_offset); first_bit, nbits);
vecp->i_len = nbits * XFS_BLF_CHUNK; blfp->blf_size++;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
break; break;
} else if (next_bit != last_bit + 1) { } else if (next_bit != last_bit + 1 ||
buffer_offset = offset + first_bit * XFS_BLF_CHUNK; xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
vecp->i_addr = xfs_buf_offset(bp, buffer_offset); xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
vecp->i_len = nbits * XFS_BLF_CHUNK; first_bit, nbits);
vecp->i_type = XLOG_REG_TYPE_BCHUNK; blfp->blf_size++;
nvecs++;
vecp++;
first_bit = next_bit;
last_bit = next_bit;
nbits = 1;
} else if (xfs_buf_offset(bp, offset +
(next_bit << XFS_BLF_SHIFT)) !=
(xfs_buf_offset(bp, offset +
(last_bit << XFS_BLF_SHIFT)) +
XFS_BLF_CHUNK)) {
buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
vecp++;
first_bit = next_bit; first_bit = next_bit;
last_bit = next_bit; last_bit = next_bit;
nbits = 1; nbits = 1;
...@@ -296,9 +297,6 @@ xfs_buf_item_format_segment( ...@@ -296,9 +297,6 @@ xfs_buf_item_format_segment(
nbits++; nbits++;
} }
} }
out:
blfp->blf_size = nvecs;
return vecp;
} }
/* /*
...@@ -310,10 +308,11 @@ xfs_buf_item_format_segment( ...@@ -310,10 +308,11 @@ xfs_buf_item_format_segment(
STATIC void STATIC void
xfs_buf_item_format( xfs_buf_item_format(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_log_iovec *vecp) struct xfs_log_vec *lv)
{ {
struct xfs_buf_log_item *bip = BUF_ITEM(lip); struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf; struct xfs_buf *bp = bip->bli_buf;
struct xfs_log_iovec *vecp = NULL;
uint offset = 0; uint offset = 0;
int i; int i;
...@@ -354,8 +353,8 @@ xfs_buf_item_format( ...@@ -354,8 +353,8 @@ xfs_buf_item_format(
} }
for (i = 0; i < bip->bli_format_count; i++) { for (i = 0; i < bip->bli_format_count; i++) {
vecp = xfs_buf_item_format_segment(bip, vecp, offset, xfs_buf_item_format_segment(bip, lv, &vecp, offset,
&bip->bli_formats[i]); &bip->bli_formats[i]);
offset += bp->b_maps[i].bm_len; offset += bp->b_maps[i].bm_len;
} }
......
...@@ -674,6 +674,7 @@ xfs_readdir( ...@@ -674,6 +674,7 @@ xfs_readdir(
{ {
int rval; /* return value */ int rval; /* return value */
int v; /* type-checking value */ int v; /* type-checking value */
uint lock_mode;
trace_xfs_readdir(dp); trace_xfs_readdir(dp);
...@@ -683,6 +684,7 @@ xfs_readdir( ...@@ -683,6 +684,7 @@ xfs_readdir(
ASSERT(S_ISDIR(dp->i_d.di_mode)); ASSERT(S_ISDIR(dp->i_d.di_mode));
XFS_STATS_INC(xs_dir_getdents); XFS_STATS_INC(xs_dir_getdents);
lock_mode = xfs_ilock_data_map_shared(dp);
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_getdents(dp, ctx); rval = xfs_dir2_sf_getdents(dp, ctx);
else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
...@@ -691,5 +693,7 @@ xfs_readdir( ...@@ -691,5 +693,7 @@ xfs_readdir(
rval = xfs_dir2_block_getdents(dp, ctx); rval = xfs_dir2_block_getdents(dp, ctx);
else else
rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
xfs_iunlock(dp, lock_mode);
return rval; return rval;
} }
...@@ -170,6 +170,7 @@ xfs_dir2_block_to_sf( ...@@ -170,6 +170,7 @@ xfs_dir2_block_to_sf(
char *ptr; /* current data pointer */ char *ptr; /* current data pointer */
xfs_dir2_sf_entry_t *sfep; /* shortform entry */ xfs_dir2_sf_entry_t *sfep; /* shortform entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */ xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */
xfs_dir2_sf_hdr_t *dst; /* temporary data buffer */
trace_xfs_dir2_block_to_sf(args); trace_xfs_dir2_block_to_sf(args);
...@@ -177,35 +178,20 @@ xfs_dir2_block_to_sf( ...@@ -177,35 +178,20 @@ xfs_dir2_block_to_sf(
mp = dp->i_mount; mp = dp->i_mount;
/* /*
* Make a copy of the block data, so we can shrink the inode * allocate a temporary destination buffer the size of the inode
* and add local data. * to format the data into. Once we have formatted the data, we
* can free the block and copy the formatted data into the inode literal
* area.
*/ */
hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP); dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP);
memcpy(hdr, bp->b_addr, mp->m_dirblksize); hdr = bp->b_addr;
logflags = XFS_ILOG_CORE;
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
ASSERT(error != ENOSPC);
goto out;
}
/*
* The buffer is now unconditionally gone, whether
* xfs_dir2_shrink_inode worked or not.
*
* Convert the inode to local format.
*/
dp->i_df.if_flags &= ~XFS_IFEXTENTS;
dp->i_df.if_flags |= XFS_IFINLINE;
dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
ASSERT(dp->i_df.if_bytes == 0);
xfs_idata_realloc(dp, size, XFS_DATA_FORK);
logflags |= XFS_ILOG_DDATA;
/* /*
* Copy the header into the newly allocate local space. * Copy the header into the newly allocate local space.
*/ */
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; sfp = (xfs_dir2_sf_hdr_t *)dst;
memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count)); memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
dp->i_d.di_size = size;
/* /*
* Set up to loop over the block's entries. * Set up to loop over the block's entries.
*/ */
...@@ -258,10 +244,34 @@ xfs_dir2_block_to_sf( ...@@ -258,10 +244,34 @@ xfs_dir2_block_to_sf(
ptr += dp->d_ops->data_entsize(dep->namelen); ptr += dp->d_ops->data_entsize(dep->namelen);
} }
ASSERT((char *)sfep - (char *)sfp == size); ASSERT((char *)sfep - (char *)sfp == size);
/* now we are done with the block, we can shrink the inode */
logflags = XFS_ILOG_CORE;
error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp);
if (error) {
ASSERT(error != ENOSPC);
goto out;
}
/*
* The buffer is now unconditionally gone, whether
* xfs_dir2_shrink_inode worked or not.
*
* Convert the inode to local format and copy the data in.
*/
dp->i_df.if_flags &= ~XFS_IFEXTENTS;
dp->i_df.if_flags |= XFS_IFINLINE;
dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
ASSERT(dp->i_df.if_bytes == 0);
xfs_idata_realloc(dp, size, XFS_DATA_FORK);
logflags |= XFS_ILOG_DDATA;
memcpy(dp->i_df.if_u1.if_data, dst, size);
dp->i_d.di_size = size;
xfs_dir2_sf_check(args); xfs_dir2_sf_check(args);
out: out:
xfs_trans_log_inode(args->trans, dp, logflags); xfs_trans_log_inode(args->trans, dp, logflags);
kmem_free(hdr); kmem_free(dst);
return error; return error;
} }
......
...@@ -469,16 +469,17 @@ xfs_qm_dqtobp( ...@@ -469,16 +469,17 @@ xfs_qm_dqtobp(
struct xfs_mount *mp = dqp->q_mount; struct xfs_mount *mp = dqp->q_mount;
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
struct xfs_trans *tp = (tpp ? *tpp : NULL); struct xfs_trans *tp = (tpp ? *tpp : NULL);
uint lock_mode;
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
xfs_ilock(quotip, XFS_ILOCK_SHARED); lock_mode = xfs_ilock_data_map_shared(quotip);
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
/* /*
* Return if this type of quotas is turned off while we * Return if this type of quotas is turned off while we
* didn't have the quota inode lock. * didn't have the quota inode lock.
*/ */
xfs_iunlock(quotip, XFS_ILOCK_SHARED); xfs_iunlock(quotip, lock_mode);
return ESRCH; return ESRCH;
} }
...@@ -488,7 +489,7 @@ xfs_qm_dqtobp( ...@@ -488,7 +489,7 @@ xfs_qm_dqtobp(
error = xfs_bmapi_read(quotip, dqp->q_fileoffset, error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
xfs_iunlock(quotip, XFS_ILOCK_SHARED); xfs_iunlock(quotip, lock_mode);
if (error) if (error)
return error; return error;
......
...@@ -57,20 +57,24 @@ xfs_qm_dquot_logitem_size( ...@@ -57,20 +57,24 @@ xfs_qm_dquot_logitem_size(
STATIC void STATIC void
xfs_qm_dquot_logitem_format( xfs_qm_dquot_logitem_format(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_log_iovec *logvec) struct xfs_log_vec *lv)
{ {
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
logvec->i_addr = &qlip->qli_format; struct xfs_dq_logformat *qlf;
logvec->i_len = sizeof(xfs_dq_logformat_t);
logvec->i_type = XLOG_REG_TYPE_QFORMAT; qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT);
logvec++; qlf->qlf_type = XFS_LI_DQUOT;
logvec->i_addr = &qlip->qli_dquot->q_core; qlf->qlf_size = 2;
logvec->i_len = sizeof(xfs_disk_dquot_t); qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id);
logvec->i_type = XLOG_REG_TYPE_DQUOT; qlf->qlf_blkno = qlip->qli_dquot->q_blkno;
qlf->qlf_len = 1;
qlip->qli_format.qlf_size = 2; qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset;
xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat));
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT,
&qlip->qli_dquot->q_core,
sizeof(struct xfs_disk_dquot));
} }
/* /*
...@@ -257,18 +261,6 @@ xfs_qm_dquot_logitem_init( ...@@ -257,18 +261,6 @@ xfs_qm_dquot_logitem_init(
xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
&xfs_dquot_item_ops); &xfs_dquot_item_ops);
lp->qli_dquot = dqp; lp->qli_dquot = dqp;
lp->qli_format.qlf_type = XFS_LI_DQUOT;
lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
lp->qli_format.qlf_blkno = dqp->q_blkno;
lp->qli_format.qlf_len = 1;
/*
* This is just the offset of this dquot within its buffer
* (which is currently 1 FSB and probably won't change).
* Hence 32 bits for this offset should be just fine.
* Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
* here, and recompute it at recovery time.
*/
lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
} }
/*------------------ QUOTAOFF LOG ITEMS -------------------*/ /*------------------ QUOTAOFF LOG ITEMS -------------------*/
...@@ -294,26 +286,20 @@ xfs_qm_qoff_logitem_size( ...@@ -294,26 +286,20 @@ xfs_qm_qoff_logitem_size(
*nbytes += sizeof(struct xfs_qoff_logitem); *nbytes += sizeof(struct xfs_qoff_logitem);
} }
/*
* This is called to fill in the vector of log iovecs for the
* given quotaoff log item. We use only 1 iovec, and we point that
* at the quotaoff_log_format structure embedded in the quotaoff item.
* It is at this point that we assert that all of the extent
* slots in the quotaoff item have been filled.
*/
STATIC void STATIC void
xfs_qm_qoff_logitem_format( xfs_qm_qoff_logitem_format(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector) struct xfs_log_vec *lv)
{ {
struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF); struct xfs_qoff_logformat *qlf;
log_vector->i_addr = &qflip->qql_format; qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF);
log_vector->i_len = sizeof(xfs_qoff_logitem_t); qlf->qf_type = XFS_LI_QUOTAOFF;
log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; qlf->qf_size = 1;
qflip->qql_format.qf_size = 1; qlf->qf_flags = qflip->qql_flags;
xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem));
} }
/* /*
...@@ -453,8 +439,7 @@ xfs_qm_qoff_logitem_init( ...@@ -453,8 +439,7 @@ xfs_qm_qoff_logitem_init(
xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
&xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
qf->qql_item.li_mountp = mp; qf->qql_item.li_mountp = mp;
qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
qf->qql_format.qf_flags = flags;
qf->qql_start_lip = start; qf->qql_start_lip = start;
qf->qql_flags = flags;
return qf; return qf;
} }
...@@ -27,13 +27,12 @@ typedef struct xfs_dq_logitem { ...@@ -27,13 +27,12 @@ typedef struct xfs_dq_logitem {
xfs_log_item_t qli_item; /* common portion */ xfs_log_item_t qli_item; /* common portion */
struct xfs_dquot *qli_dquot; /* dquot ptr */ struct xfs_dquot *qli_dquot; /* dquot ptr */
xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
xfs_dq_logformat_t qli_format; /* logged structure */
} xfs_dq_logitem_t; } xfs_dq_logitem_t;
typedef struct xfs_qoff_logitem { typedef struct xfs_qoff_logitem {
xfs_log_item_t qql_item; /* common portion */ xfs_log_item_t qql_item; /* common portion */
struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
xfs_qoff_logformat_t qql_format; /* logged structure */ unsigned int qql_flags;
} xfs_qoff_logitem_t; } xfs_qoff_logitem_t;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "xfs_trans_priv.h" #include "xfs_trans_priv.h"
#include "xfs_buf_item.h" #include "xfs_buf_item.h"
#include "xfs_extfree_item.h" #include "xfs_extfree_item.h"
#include "xfs_log.h"
kmem_zone_t *xfs_efi_zone; kmem_zone_t *xfs_efi_zone;
...@@ -101,9 +102,10 @@ xfs_efi_item_size( ...@@ -101,9 +102,10 @@ xfs_efi_item_size(
STATIC void STATIC void
xfs_efi_item_format( xfs_efi_item_format(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector) struct xfs_log_vec *lv)
{ {
struct xfs_efi_log_item *efip = EFI_ITEM(lip); struct xfs_efi_log_item *efip = EFI_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
ASSERT(atomic_read(&efip->efi_next_extent) == ASSERT(atomic_read(&efip->efi_next_extent) ==
efip->efi_format.efi_nextents); efip->efi_format.efi_nextents);
...@@ -111,10 +113,9 @@ xfs_efi_item_format( ...@@ -111,10 +113,9 @@ xfs_efi_item_format(
efip->efi_format.efi_type = XFS_LI_EFI; efip->efi_format.efi_type = XFS_LI_EFI;
efip->efi_format.efi_size = 1; efip->efi_format.efi_size = 1;
log_vector->i_addr = &efip->efi_format; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFI_FORMAT,
log_vector->i_len = xfs_efi_item_sizeof(efip); &efip->efi_format,
log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; xfs_efi_item_sizeof(efip));
ASSERT(log_vector->i_len >= sizeof(xfs_efi_log_format_t));
} }
...@@ -368,19 +369,19 @@ xfs_efd_item_size( ...@@ -368,19 +369,19 @@ xfs_efd_item_size(
STATIC void STATIC void
xfs_efd_item_format( xfs_efd_item_format(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector) struct xfs_log_vec *lv)
{ {
struct xfs_efd_log_item *efdp = EFD_ITEM(lip); struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents);
efdp->efd_format.efd_type = XFS_LI_EFD; efdp->efd_format.efd_type = XFS_LI_EFD;
efdp->efd_format.efd_size = 1; efdp->efd_format.efd_size = 1;
log_vector->i_addr = &efdp->efd_format; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFD_FORMAT,
log_vector->i_len = xfs_efd_item_sizeof(efdp); &efdp->efd_format,
log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; xfs_efd_item_sizeof(efdp));
ASSERT(log_vector->i_len >= sizeof(xfs_efd_log_format_t));
} }
/* /*
......
...@@ -912,7 +912,7 @@ xfs_dir_open( ...@@ -912,7 +912,7 @@ xfs_dir_open(
* If there are any blocks, read-ahead block 0 as we're almost * If there are any blocks, read-ahead block 0 as we're almost
* certain to have the next operation be a read there. * certain to have the next operation be a read there.
*/ */
mode = xfs_ilock_map_shared(ip); mode = xfs_ilock_data_map_shared(ip);
if (ip->i_d.di_nextents > 0) if (ip->i_d.di_nextents > 0)
xfs_dir3_data_readahead(NULL, ip, 0, -1); xfs_dir3_data_readahead(NULL, ip, 0, -1);
xfs_iunlock(ip, mode); xfs_iunlock(ip, mode);
...@@ -1215,7 +1215,7 @@ xfs_seek_data( ...@@ -1215,7 +1215,7 @@ xfs_seek_data(
uint lock; uint lock;
int error; int error;
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_data_map_shared(ip);
isize = i_size_read(inode); isize = i_size_read(inode);
if (start >= isize) { if (start >= isize) {
...@@ -1294,7 +1294,7 @@ xfs_seek_data( ...@@ -1294,7 +1294,7 @@ xfs_seek_data(
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out_unlock: out_unlock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock(ip, lock);
if (error) if (error)
return -error; return -error;
...@@ -1319,7 +1319,7 @@ xfs_seek_hole( ...@@ -1319,7 +1319,7 @@ xfs_seek_hole(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO); return -XFS_ERROR(EIO);
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_data_map_shared(ip);
isize = i_size_read(inode); isize = i_size_read(inode);
if (start >= isize) { if (start >= isize) {
...@@ -1402,7 +1402,7 @@ xfs_seek_hole( ...@@ -1402,7 +1402,7 @@ xfs_seek_hole(
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out_unlock: out_unlock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock(ip, lock);
if (error) if (error)
return -error; return -error;
......
...@@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment( ...@@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment(
{ {
if (xfs_sb_version_hasalign(&args->mp->m_sb) && if (xfs_sb_version_hasalign(&args->mp->m_sb) &&
args->mp->m_sb.sb_inoalignmt >= args->mp->m_sb.sb_inoalignmt >=
XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size))
return args->mp->m_sb.sb_inoalignmt; return args->mp->m_sb.sb_inoalignmt;
return 1; return 1;
} }
...@@ -170,27 +170,20 @@ xfs_ialloc_inode_init( ...@@ -170,27 +170,20 @@ xfs_ialloc_inode_init(
{ {
struct xfs_buf *fbuf; struct xfs_buf *fbuf;
struct xfs_dinode *free; struct xfs_dinode *free;
int blks_per_cluster, nbufs, ninodes; int nbufs, blks_per_cluster, inodes_per_cluster;
int version; int version;
int i, j; int i, j;
xfs_daddr_t d; xfs_daddr_t d;
xfs_ino_t ino = 0; xfs_ino_t ino = 0;
/* /*
* Loop over the new block(s), filling in the inodes. * Loop over the new block(s), filling in the inodes. For small block
* For small block sizes, manipulate the inodes in buffers * sizes, manipulate the inodes in buffers which are multiples of the
* which are multiples of the blocks size. * blocks size.
*/ */
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = xfs_icluster_size_fsb(mp);
blks_per_cluster = 1; inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
nbufs = length; nbufs = length / blks_per_cluster;
ninodes = mp->m_sb.sb_inopblock;
} else {
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
mp->m_sb.sb_blocksize;
nbufs = length / blks_per_cluster;
ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
}
/* /*
* Figure out what version number to use in the inodes we create. If * Figure out what version number to use in the inodes we create. If
...@@ -225,7 +218,7 @@ xfs_ialloc_inode_init( ...@@ -225,7 +218,7 @@ xfs_ialloc_inode_init(
* they track in the AIL as if they were physically logged. * they track in the AIL as if they were physically logged.
*/ */
if (tp) if (tp)
xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos,
mp->m_sb.sb_inodesize, length, gen); mp->m_sb.sb_inodesize, length, gen);
} else if (xfs_sb_version_hasnlink(&mp->m_sb)) } else if (xfs_sb_version_hasnlink(&mp->m_sb))
version = 2; version = 2;
...@@ -246,7 +239,7 @@ xfs_ialloc_inode_init( ...@@ -246,7 +239,7 @@ xfs_ialloc_inode_init(
/* Initialize the inode buffers and log them appropriately. */ /* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops; fbuf->b_ops = &xfs_inode_buf_ops;
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
for (i = 0; i < ninodes; i++) { for (i = 0; i < inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog; int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = xfs_dinode_size(version); uint isize = xfs_dinode_size(version);
...@@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc( ...@@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc(
* Locking will ensure that we don't have two callers in here * Locking will ensure that we don't have two callers in here
* at one time. * at one time.
*/ */
newlen = XFS_IALLOC_INODES(args.mp); newlen = args.mp->m_ialloc_inos;
if (args.mp->m_maxicount && if (args.mp->m_maxicount &&
args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
return XFS_ERROR(ENOSPC); return XFS_ERROR(ENOSPC);
args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); args.minlen = args.maxlen = args.mp->m_ialloc_blks;
/* /*
* First try to allocate inodes contiguous with the last-allocated * First try to allocate inodes contiguous with the last-allocated
* chunk of inodes. If the filesystem is striped, this will fill * chunk of inodes. If the filesystem is striped, this will fill
...@@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc( ...@@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc(
newino = be32_to_cpu(agi->agi_newino); newino = be32_to_cpu(agi->agi_newino);
agno = be32_to_cpu(agi->agi_seqno); agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
XFS_IALLOC_BLOCKS(args.mp); args.mp->m_ialloc_blks;
if (likely(newino != NULLAGINO && if (likely(newino != NULLAGINO &&
(args.agbno < be32_to_cpu(agi->agi_length)))) { (args.agbno < be32_to_cpu(agi->agi_length)))) {
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
...@@ -585,7 +578,7 @@ xfs_ialloc_ag_select( ...@@ -585,7 +578,7 @@ xfs_ialloc_ag_select(
* Is there enough free space for the file plus a block of * Is there enough free space for the file plus a block of
* inodes? (if we need to allocate some)? * inodes? (if we need to allocate some)?
*/ */
ineed = XFS_IALLOC_BLOCKS(mp); ineed = mp->m_ialloc_blks;
longest = pag->pagf_longest; longest = pag->pagf_longest;
if (!longest) if (!longest)
longest = pag->pagf_flcount > 0; longest = pag->pagf_flcount > 0;
...@@ -999,7 +992,7 @@ xfs_dialloc( ...@@ -999,7 +992,7 @@ xfs_dialloc(
* inode. * inode.
*/ */
if (mp->m_maxicount && if (mp->m_maxicount &&
mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) {
noroom = 1; noroom = 1;
okalloc = 0; okalloc = 0;
} }
...@@ -1202,7 +1195,7 @@ xfs_difree( ...@@ -1202,7 +1195,7 @@ xfs_difree(
* When an inode cluster is free, it becomes eligible for removal * When an inode cluster is free, it becomes eligible for removal
*/ */
if (!(mp->m_flags & XFS_MOUNT_IKEEP) && if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
(rec.ir_freecount == XFS_IALLOC_INODES(mp))) { (rec.ir_freecount == mp->m_ialloc_inos)) {
*delete = 1; *delete = 1;
*first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
...@@ -1212,7 +1205,7 @@ xfs_difree( ...@@ -1212,7 +1205,7 @@ xfs_difree(
* AGI and Superblock inode counts, and mark the disk space * AGI and Superblock inode counts, and mark the disk space
* to be freed when the transaction is committed. * to be freed when the transaction is committed.
*/ */
ilen = XFS_IALLOC_INODES(mp); ilen = mp->m_ialloc_inos;
be32_add_cpu(&agi->agi_count, -ilen); be32_add_cpu(&agi->agi_count, -ilen);
be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
...@@ -1228,9 +1221,9 @@ xfs_difree( ...@@ -1228,9 +1221,9 @@ xfs_difree(
goto error0; goto error0;
} }
xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno,
agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)), XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)),
XFS_IALLOC_BLOCKS(mp), flist, mp); mp->m_ialloc_blks, flist, mp);
} else { } else {
*delete = 0; *delete = 0;
...@@ -1311,7 +1304,7 @@ xfs_imap_lookup( ...@@ -1311,7 +1304,7 @@ xfs_imap_lookup(
/* check that the returned record contains the required inode */ /* check that the returned record contains the required inode */
if (rec.ir_startino > agino || if (rec.ir_startino > agino ||
rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) rec.ir_startino + mp->m_ialloc_inos <= agino)
return EINVAL; return EINVAL;
/* for untrusted inodes check it is allocated first */ /* for untrusted inodes check it is allocated first */
...@@ -1384,7 +1377,7 @@ xfs_imap( ...@@ -1384,7 +1377,7 @@ xfs_imap(
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
} }
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; blks_per_cluster = xfs_icluster_size_fsb(mp);
/* /*
* For bulkstat and handle lookups, we have an untrusted inode number * For bulkstat and handle lookups, we have an untrusted inode number
...@@ -1405,7 +1398,7 @@ xfs_imap( ...@@ -1405,7 +1398,7 @@ xfs_imap(
* If the inode cluster size is the same as the blocksize or * If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics. * smaller we get to the buffer by simple arithmetics.
*/ */
if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { if (blks_per_cluster == 1) {
offset = XFS_INO_TO_OFFSET(mp, ino); offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock); ASSERT(offset < mp->m_sb.sb_inopblock);
......
...@@ -25,17 +25,18 @@ struct xfs_mount; ...@@ -25,17 +25,18 @@ struct xfs_mount;
struct xfs_trans; struct xfs_trans;
struct xfs_btree_cur; struct xfs_btree_cur;
/* /* Move inodes in clusters of this size */
* Allocation parameters for inode allocation.
*/
#define XFS_IALLOC_INODES(mp) (mp)->m_ialloc_inos
#define XFS_IALLOC_BLOCKS(mp) (mp)->m_ialloc_blks
/*
* Move inodes in clusters of this size.
*/
#define XFS_INODE_BIG_CLUSTER_SIZE 8192 #define XFS_INODE_BIG_CLUSTER_SIZE 8192
#define XFS_INODE_CLUSTER_SIZE(mp) (mp)->m_inode_cluster_size
/* Calculate and return the number of filesystem blocks per inode cluster */
static inline int
xfs_icluster_size_fsb(
struct xfs_mount *mp)
{
if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size)
return 1;
return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
}
/* /*
* Make an inode pointer out of the buffer/offset. * Make an inode pointer out of the buffer/offset.
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "xfs_trans_priv.h" #include "xfs_trans_priv.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_icreate_item.h" #include "xfs_icreate_item.h"
#include "xfs_log.h"
kmem_zone_t *xfs_icreate_zone; /* inode create item zone */ kmem_zone_t *xfs_icreate_zone; /* inode create item zone */
...@@ -58,13 +59,14 @@ xfs_icreate_item_size( ...@@ -58,13 +59,14 @@ xfs_icreate_item_size(
STATIC void STATIC void
xfs_icreate_item_format( xfs_icreate_item_format(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_log_iovec *log_vector) struct xfs_log_vec *lv)
{ {
struct xfs_icreate_item *icp = ICR_ITEM(lip); struct xfs_icreate_item *icp = ICR_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
log_vector->i_addr = (xfs_caddr_t)&icp->ic_format; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICREATE,
log_vector->i_len = sizeof(struct xfs_icreate_log); &icp->ic_format,
log_vector->i_type = XLOG_REG_TYPE_ICREATE; sizeof(struct xfs_icreate_log));
} }
......
...@@ -77,48 +77,44 @@ xfs_get_extsz_hint( ...@@ -77,48 +77,44 @@ xfs_get_extsz_hint(
} }
/* /*
* This is a wrapper routine around the xfs_ilock() routine used to centralize * These two are wrapper routines around the xfs_ilock() routine used to
* some grungy code. It is used in places that wish to lock the inode solely * centralize some grungy code. They are used in places that wish to lock the
* for reading the extents. The reason these places can't just call * inode solely for reading the extents. The reason these places can't just
* xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
* extents from disk for a file in b-tree format. If the inode is in b-tree * bringing in of the extents from disk for a file in b-tree format. If the
* format, then we need to lock the inode exclusively until the extents are read * inode is in b-tree format, then we need to lock the inode exclusively until
* in. Locking it exclusively all the time would limit our parallelism * the extents are read in. Locking it exclusively all the time would limit
* unnecessarily, though. What we do instead is check to see if the extents * our parallelism unnecessarily, though. What we do instead is check to see
* have been read in yet, and only lock the inode exclusively if they have not. * if the extents have been read in yet, and only lock the inode exclusively
* if they have not.
* *
* The function returns a value which should be given to the corresponding * The functions return a value which should be given to the corresponding
* xfs_iunlock_map_shared(). This value is the mode in which the lock was * xfs_iunlock() call.
* actually taken.
*/ */
uint uint
xfs_ilock_map_shared( xfs_ilock_data_map_shared(
xfs_inode_t *ip) struct xfs_inode *ip)
{ {
uint lock_mode; uint lock_mode = XFS_ILOCK_SHARED;
if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
lock_mode = XFS_ILOCK_EXCL; lock_mode = XFS_ILOCK_EXCL;
} else {
lock_mode = XFS_ILOCK_SHARED;
}
xfs_ilock(ip, lock_mode); xfs_ilock(ip, lock_mode);
return lock_mode; return lock_mode;
} }
/* uint
* This is simply the unlock routine to go with xfs_ilock_map_shared(). xfs_ilock_attr_map_shared(
* All it does is call xfs_iunlock() with the given lock_mode. struct xfs_inode *ip)
*/
void
xfs_iunlock_map_shared(
xfs_inode_t *ip,
unsigned int lock_mode)
{ {
xfs_iunlock(ip, lock_mode); uint lock_mode = XFS_ILOCK_SHARED;
if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
(ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lock_mode);
return lock_mode;
} }
/* /*
...@@ -588,9 +584,9 @@ xfs_lookup( ...@@ -588,9 +584,9 @@ xfs_lookup(
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
lock_mode = xfs_ilock_map_shared(dp); lock_mode = xfs_ilock_data_map_shared(dp);
error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
xfs_iunlock_map_shared(dp, lock_mode); xfs_iunlock(dp, lock_mode);
if (error) if (error)
goto out; goto out;
...@@ -2141,8 +2137,8 @@ xfs_ifree_cluster( ...@@ -2141,8 +2137,8 @@ xfs_ifree_cluster(
{ {
xfs_mount_t *mp = free_ip->i_mount; xfs_mount_t *mp = free_ip->i_mount;
int blks_per_cluster; int blks_per_cluster;
int inodes_per_cluster;
int nbufs; int nbufs;
int ninodes;
int i, j; int i, j;
xfs_daddr_t blkno; xfs_daddr_t blkno;
xfs_buf_t *bp; xfs_buf_t *bp;
...@@ -2152,18 +2148,11 @@ xfs_ifree_cluster( ...@@ -2152,18 +2148,11 @@ xfs_ifree_cluster(
struct xfs_perag *pag; struct xfs_perag *pag;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = xfs_icluster_size_fsb(mp);
blks_per_cluster = 1; inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
ninodes = mp->m_sb.sb_inopblock; nbufs = mp->m_ialloc_blks / blks_per_cluster;
nbufs = XFS_IALLOC_BLOCKS(mp);
} else {
blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
mp->m_sb.sb_blocksize;
ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
}
for (j = 0; j < nbufs; j++, inum += ninodes) { for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
XFS_INO_TO_AGBNO(mp, inum)); XFS_INO_TO_AGBNO(mp, inum));
...@@ -2225,7 +2214,7 @@ xfs_ifree_cluster( ...@@ -2225,7 +2214,7 @@ xfs_ifree_cluster(
* transaction stale above, which means there is no point in * transaction stale above, which means there is no point in
* even trying to lock them. * even trying to lock them.
*/ */
for (i = 0; i < ninodes; i++) { for (i = 0; i < inodes_per_cluster; i++) {
retry: retry:
rcu_read_lock(); rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, ip = radix_tree_lookup(&pag->pag_ici_root,
...@@ -2906,13 +2895,13 @@ xfs_iflush_cluster( ...@@ -2906,13 +2895,13 @@ xfs_iflush_cluster(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
if (!ilist) if (!ilist)
goto out_put; goto out_put;
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
rcu_read_lock(); rcu_read_lock();
/* really need a gang lookup range call here */ /* really need a gang lookup range call here */
......
...@@ -337,8 +337,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint); ...@@ -337,8 +337,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint);
void xfs_iunlock(xfs_inode_t *, uint); void xfs_iunlock(xfs_inode_t *, uint);
void xfs_ilock_demote(xfs_inode_t *, uint); void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint); int xfs_isilocked(xfs_inode_t *, uint);
uint xfs_ilock_map_shared(xfs_inode_t *); uint xfs_ilock_data_map_shared(struct xfs_inode *);
void xfs_iunlock_map_shared(xfs_inode_t *, uint); uint xfs_ilock_attr_map_shared(struct xfs_inode *);
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
xfs_nlink_t, xfs_dev_t, prid_t, int, xfs_nlink_t, xfs_dev_t, prid_t, int,
struct xfs_buf **, xfs_inode_t **); struct xfs_buf **, xfs_inode_t **);
......
...@@ -431,6 +431,8 @@ xfs_iread_extents( ...@@ -431,6 +431,8 @@ xfs_iread_extents(
xfs_ifork_t *ifp; xfs_ifork_t *ifp;
xfs_extnum_t nextents; xfs_extnum_t nextents;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
ip->i_mount); ip->i_mount);
...@@ -721,15 +723,16 @@ xfs_idestroy_fork( ...@@ -721,15 +723,16 @@ xfs_idestroy_fork(
} }
/* /*
* xfs_iextents_copy() * Convert in-core extents to on-disk form
* *
* This is called to copy the REAL extents (as opposed to the delayed * For either the data or attr fork in extent format, we need to endian convert
* allocation extents) from the inode into the given buffer. It * the in-core extent as we place them into the on-disk inode.
* returns the number of bytes copied into the buffer.
* *
* If there are no delayed allocation extents, then we can just * In the case of the data fork, the in-core and on-disk fork sizes can be
* memcpy() the extents into the buffer. Otherwise, we need to * different due to delayed allocation extents. We only copy on-disk extents
* examine each extent in turn and skip those which are delayed. * here, so callers must always use the physical fork size to determine the
* size of the buffer passed to this routine. We will return the size actually
* used.
*/ */
int int
xfs_iextents_copy( xfs_iextents_copy(
......
This diff is collapsed.
...@@ -34,11 +34,6 @@ typedef struct xfs_inode_log_item { ...@@ -34,11 +34,6 @@ typedef struct xfs_inode_log_item {
unsigned short ili_logged; /* flushed logged data */ unsigned short ili_logged; /* flushed logged data */
unsigned int ili_last_fields; /* fields when flushed */ unsigned int ili_last_fields; /* fields when flushed */
unsigned int ili_fields; /* fields to be logged */ unsigned int ili_fields; /* fields to be logged */
struct xfs_bmbt_rec *ili_extents_buf; /* array of logged
data exts */
struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
attr exts */
xfs_inode_log_format_t ili_format; /* logged structure */
} xfs_inode_log_item_t; } xfs_inode_log_item_t;
static inline int xfs_inode_clean(xfs_inode_t *ip) static inline int xfs_inode_clean(xfs_inode_t *ip)
......
...@@ -112,15 +112,11 @@ xfs_find_handle( ...@@ -112,15 +112,11 @@ xfs_find_handle(
memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
hsize = sizeof(xfs_fsid_t); hsize = sizeof(xfs_fsid_t);
} else { } else {
int lock_mode;
lock_mode = xfs_ilock_map_shared(ip);
handle.ha_fid.fid_len = sizeof(xfs_fid_t) - handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
sizeof(handle.ha_fid.fid_len); sizeof(handle.ha_fid.fid_len);
handle.ha_fid.fid_pad = 0; handle.ha_fid.fid_pad = 0;
handle.ha_fid.fid_gen = ip->i_d.di_gen; handle.ha_fid.fid_gen = ip->i_d.di_gen;
handle.ha_fid.fid_ino = ip->i_ino; handle.ha_fid.fid_ino = ip->i_ino;
xfs_iunlock_map_shared(ip, lock_mode);
hsize = XFS_HSIZE(handle); hsize = XFS_HSIZE(handle);
} }
......
...@@ -459,14 +459,12 @@ xfs_vn_getattr( ...@@ -459,14 +459,12 @@ xfs_vn_getattr(
static void static void
xfs_setattr_mode( xfs_setattr_mode(
struct xfs_trans *tp,
struct xfs_inode *ip, struct xfs_inode *ip,
struct iattr *iattr) struct iattr *iattr)
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
umode_t mode = iattr->ia_mode; umode_t mode = iattr->ia_mode;
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ip->i_d.di_mode &= S_IFMT; ip->i_d.di_mode &= S_IFMT;
...@@ -476,6 +474,32 @@ xfs_setattr_mode( ...@@ -476,6 +474,32 @@ xfs_setattr_mode(
inode->i_mode |= mode & ~S_IFMT; inode->i_mode |= mode & ~S_IFMT;
} }
static void
xfs_setattr_time(
struct xfs_inode *ip,
struct iattr *iattr)
{
struct inode *inode = VFS_I(ip);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (iattr->ia_valid & ATTR_ATIME) {
inode->i_atime = iattr->ia_atime;
ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
}
if (iattr->ia_valid & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
}
if (iattr->ia_valid & ATTR_MTIME) {
inode->i_mtime = iattr->ia_mtime;
ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
}
}
int int
xfs_setattr_nonsize( xfs_setattr_nonsize(
struct xfs_inode *ip, struct xfs_inode *ip,
...@@ -630,30 +654,10 @@ xfs_setattr_nonsize( ...@@ -630,30 +654,10 @@ xfs_setattr_nonsize(
} }
} }
/*
* Change file access modes.
*/
if (mask & ATTR_MODE) if (mask & ATTR_MODE)
xfs_setattr_mode(tp, ip, iattr); xfs_setattr_mode(ip, iattr);
if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
/* xfs_setattr_time(ip, iattr);
* Change file access or modified times.
*/
if (mask & ATTR_ATIME) {
inode->i_atime = iattr->ia_atime;
ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
}
if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
}
if (mask & ATTR_MTIME) {
inode->i_mtime = iattr->ia_mtime;
ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
}
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
...@@ -868,22 +872,10 @@ xfs_setattr_size( ...@@ -868,22 +872,10 @@ xfs_setattr_size(
xfs_inode_clear_eofblocks_tag(ip); xfs_inode_clear_eofblocks_tag(ip);
} }
/*
* Change file access modes.
*/
if (mask & ATTR_MODE) if (mask & ATTR_MODE)
xfs_setattr_mode(tp, ip, iattr); xfs_setattr_mode(ip, iattr);
if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
if (mask & ATTR_CTIME) { xfs_setattr_time(ip, iattr);
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
}
if (mask & ATTR_MTIME) {
inode->i_mtime = iattr->ia_mtime;
ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
}
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
......
...@@ -209,9 +209,8 @@ xfs_bulkstat( ...@@ -209,9 +209,8 @@ xfs_bulkstat(
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
xfs_ino_t lastino; /* last inode number returned */ xfs_ino_t lastino; /* last inode number returned */
int nbcluster; /* # of blocks in a cluster */ int blks_per_cluster; /* # of blocks per cluster */
int nicluster; /* # of inodes in a cluster */ int inodes_per_cluster;/* # of inodes per cluster */
int nimask; /* mask for inode clusters */
int nirbuf; /* size of irbuf */ int nirbuf; /* size of irbuf */
int rval; /* return value error code */ int rval; /* return value error code */
int tmp; /* result value from btree calls */ int tmp; /* result value from btree calls */
...@@ -243,11 +242,8 @@ xfs_bulkstat( ...@@ -243,11 +242,8 @@ xfs_bulkstat(
*done = 0; *done = 0;
fmterror = 0; fmterror = 0;
ubufp = ubuffer; ubufp = ubuffer;
nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? blks_per_cluster = xfs_icluster_size_fsb(mp);
mp->m_sb.sb_inopblock : inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
if (!irbuf) if (!irbuf)
return ENOMEM; return ENOMEM;
...@@ -390,12 +386,12 @@ xfs_bulkstat( ...@@ -390,12 +386,12 @@ xfs_bulkstat(
agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
for (chunkidx = 0; for (chunkidx = 0;
chunkidx < XFS_INODES_PER_CHUNK; chunkidx < XFS_INODES_PER_CHUNK;
chunkidx += nicluster, chunkidx += inodes_per_cluster,
agbno += nbcluster) { agbno += blks_per_cluster) {
if (xfs_inobt_maskn(chunkidx, nicluster) if (xfs_inobt_maskn(chunkidx,
& ~r.ir_free) inodes_per_cluster) & ~r.ir_free)
xfs_btree_reada_bufs(mp, agno, xfs_btree_reada_bufs(mp, agno,
agbno, nbcluster, agbno, blks_per_cluster,
&xfs_inode_buf_ops); &xfs_inode_buf_ops);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
......
...@@ -30,6 +30,52 @@ struct xfs_log_vec { ...@@ -30,6 +30,52 @@ struct xfs_log_vec {
#define XFS_LOG_VEC_ORDERED (-1) #define XFS_LOG_VEC_ORDERED (-1)
static inline void *
xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
uint type)
{
struct xfs_log_iovec *vec = *vecp;
if (vec) {
ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
vec++;
} else {
vec = &lv->lv_iovecp[0];
}
vec->i_type = type;
vec->i_addr = lv->lv_buf + lv->lv_buf_len;
ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
*vecp = vec;
return vec->i_addr;
}
static inline void
xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
{
/*
* We need to make sure the next buffer is naturally aligned for the
* biggest basic data type we put into it. We already accounted for
* this when sizing the buffer.
*/
lv->lv_buf_len += round_up(len, sizeof(uint64_t));
vec->i_len = len;
}
static inline void *
xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
uint type, void *data, int len)
{
void *buf;
buf = xlog_prepare_iovec(lv, vecp, type);
memcpy(buf, data, len);
xlog_finish_iovec(lv, *vecp, len);
return buf;
}
/* /*
* Structure used to pass callback function and the function's argument * Structure used to pass callback function and the function's argument
* to the log manager. * to the log manager.
......
...@@ -82,36 +82,6 @@ xlog_cil_init_post_recovery( ...@@ -82,36 +82,6 @@ xlog_cil_init_post_recovery(
log->l_curr_block); log->l_curr_block);
} }
STATIC int
xlog_cil_lv_item_format(
struct xfs_log_item *lip,
struct xfs_log_vec *lv)
{
int index;
char *ptr;
/* format new vectors into array */
lip->li_ops->iop_format(lip, lv->lv_iovecp);
/* copy data into existing array */
ptr = lv->lv_buf;
for (index = 0; index < lv->lv_niovecs; index++) {
struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
memcpy(ptr, vec->i_addr, vec->i_len);
vec->i_addr = ptr;
ptr += vec->i_len;
}
/*
* some size calculations for log vectors over-estimate, so the caller
* doesn't know the amount of space actually used by the item. Return
* the byte count to the caller so they can check and store it
* appropriately.
*/
return ptr - lv->lv_buf;
}
/* /*
* Prepare the log item for insertion into the CIL. Calculate the difference in * Prepare the log item for insertion into the CIL. Calculate the difference in
* log space and vectors it will consume, and if it is a new item pin it as * log space and vectors it will consume, and if it is a new item pin it as
...@@ -232,6 +202,13 @@ xlog_cil_insert_format_items( ...@@ -232,6 +202,13 @@ xlog_cil_insert_format_items(
nbytes = 0; nbytes = 0;
} }
/*
* We 64-bit align the length of each iovec so that the start
* of the next one is naturally aligned. We'll need to
* account for that slack space here.
*/
nbytes += niovecs * sizeof(uint64_t);
/* grab the old item if it exists for reservation accounting */ /* grab the old item if it exists for reservation accounting */
old_lv = lip->li_lv; old_lv = lip->li_lv;
...@@ -254,34 +231,27 @@ xlog_cil_insert_format_items( ...@@ -254,34 +231,27 @@ xlog_cil_insert_format_items(
*/ */
*diff_iovecs -= lv->lv_niovecs; *diff_iovecs -= lv->lv_niovecs;
*diff_len -= lv->lv_buf_len; *diff_len -= lv->lv_buf_len;
} else {
/* Ensure the lv is set up according to ->iop_size */ /* allocate new data chunk */
lv->lv_niovecs = niovecs; lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
lv->lv_buf = (char *)lv + buf_size - nbytes; lv->lv_item = lip;
lv->lv_size = buf_size;
lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); if (ordered) {
goto insert; /* track as an ordered logvec */
ASSERT(lip->li_lv == NULL);
lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
goto insert;
}
lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
} }
/* allocate new data chunk */ /* Ensure the lv is set up according to ->iop_size */
lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
lv->lv_item = lip;
lv->lv_size = buf_size;
lv->lv_niovecs = niovecs; lv->lv_niovecs = niovecs;
if (ordered) {
/* track as an ordered logvec */
ASSERT(lip->li_lv == NULL);
lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
goto insert;
}
/* The allocated iovec region lies beyond the log vector. */
lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
/* The allocated data region lies beyond the iovec region */ /* The allocated data region lies beyond the iovec region */
lv->lv_buf_len = 0;
lv->lv_buf = (char *)lv + buf_size - nbytes; lv->lv_buf = (char *)lv + buf_size - nbytes;
lip->li_ops->iop_format(lip, lv);
lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
insert: insert:
ASSERT(lv->lv_buf_len <= nbytes); ASSERT(lv->lv_buf_len <= nbytes);
xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
......
...@@ -1654,6 +1654,7 @@ xlog_recover_reorder_trans( ...@@ -1654,6 +1654,7 @@ xlog_recover_reorder_trans(
int pass) int pass)
{ {
xlog_recover_item_t *item, *n; xlog_recover_item_t *item, *n;
int error = 0;
LIST_HEAD(sort_list); LIST_HEAD(sort_list);
LIST_HEAD(cancel_list); LIST_HEAD(cancel_list);
LIST_HEAD(buffer_list); LIST_HEAD(buffer_list);
...@@ -1695,9 +1696,17 @@ xlog_recover_reorder_trans( ...@@ -1695,9 +1696,17 @@ xlog_recover_reorder_trans(
"%s: unrecognized type of log operation", "%s: unrecognized type of log operation",
__func__); __func__);
ASSERT(0); ASSERT(0);
return XFS_ERROR(EIO); /*
* return the remaining items back to the transaction
* item list so they can be freed in caller.
*/
if (!list_empty(&sort_list))
list_splice_init(&sort_list, &trans->r_itemq);
error = XFS_ERROR(EIO);
goto out;
} }
} }
out:
ASSERT(list_empty(&sort_list)); ASSERT(list_empty(&sort_list));
if (!list_empty(&buffer_list)) if (!list_empty(&buffer_list))
list_splice(&buffer_list, &trans->r_itemq); list_splice(&buffer_list, &trans->r_itemq);
...@@ -1707,7 +1716,7 @@ xlog_recover_reorder_trans( ...@@ -1707,7 +1716,7 @@ xlog_recover_reorder_trans(
list_splice_tail(&inode_buffer_list, &trans->r_itemq); list_splice_tail(&inode_buffer_list, &trans->r_itemq);
if (!list_empty(&cancel_list)) if (!list_empty(&cancel_list))
list_splice_tail(&cancel_list, &trans->r_itemq); list_splice_tail(&cancel_list, &trans->r_itemq);
return 0; return error;
} }
/* /*
...@@ -2517,19 +2526,19 @@ xlog_recover_buffer_pass2( ...@@ -2517,19 +2526,19 @@ xlog_recover_buffer_pass2(
* *
* Also make sure that only inode buffers with good sizes stay in * Also make sure that only inode buffers with good sizes stay in
* the buffer cache. The kernel moves inodes in buffers of 1 block * the buffer cache. The kernel moves inodes in buffers of 1 block
* or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
* buffers in the log can be a different size if the log was generated * buffers in the log can be a different size if the log was generated
* by an older kernel using unclustered inode buffers or a newer kernel * by an older kernel using unclustered inode buffers or a newer kernel
* running with a different inode cluster size. Regardless, if the * running with a different inode cluster size. Regardless, if the
* the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
* for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep * for *our* value of mp->m_inode_cluster_size, then we need to keep
* the buffer out of the buffer cache so that the buffer won't * the buffer out of the buffer cache so that the buffer won't
* overlap with future reads of those inodes. * overlap with future reads of those inodes.
*/ */
if (XFS_DINODE_MAGIC == if (XFS_DINODE_MAGIC ==
be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
(BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { (__uint32_t)log->l_mp->m_inode_cluster_size))) {
xfs_buf_stale(bp); xfs_buf_stale(bp);
error = xfs_bwrite(bp); error = xfs_bwrite(bp);
} else { } else {
...@@ -3202,10 +3211,10 @@ xlog_recover_do_icreate_pass2( ...@@ -3202,10 +3211,10 @@ xlog_recover_do_icreate_pass2(
} }
/* existing allocation is fixed value */ /* existing allocation is fixed value */
ASSERT(count == XFS_IALLOC_INODES(mp)); ASSERT(count == mp->m_ialloc_inos);
ASSERT(length == XFS_IALLOC_BLOCKS(mp)); ASSERT(length == mp->m_ialloc_blks);
if (count != XFS_IALLOC_INODES(mp) || if (count != mp->m_ialloc_inos ||
length != XFS_IALLOC_BLOCKS(mp)) { length != mp->m_ialloc_blks) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
return EINVAL; return EINVAL;
} }
...@@ -3611,8 +3620,10 @@ xlog_recover_process_data( ...@@ -3611,8 +3620,10 @@ xlog_recover_process_data(
error = XFS_ERROR(EIO); error = XFS_ERROR(EIO);
break; break;
} }
if (error) if (error) {
xlog_recover_free_trans(trans);
return error; return error;
}
} }
dp += be32_to_cpu(ohead->oh_len); dp += be32_to_cpu(ohead->oh_len);
num_logops--; num_logops--;
......
...@@ -1222,16 +1222,18 @@ xfs_qm_dqiterate( ...@@ -1222,16 +1222,18 @@ xfs_qm_dqiterate(
lblkno = 0; lblkno = 0;
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
do { do {
uint lock_mode;
nmaps = XFS_DQITER_MAP_SIZE; nmaps = XFS_DQITER_MAP_SIZE;
/* /*
* We aren't changing the inode itself. Just changing * We aren't changing the inode itself. Just changing
* some of its data. No new blocks are added here, and * some of its data. No new blocks are added here, and
* the inode is never added to the transaction. * the inode is never added to the transaction.
*/ */
xfs_ilock(qip, XFS_ILOCK_SHARED); lock_mode = xfs_ilock_data_map_shared(qip);
error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
map, &nmaps, 0); map, &nmaps, 0);
xfs_iunlock(qip, XFS_ILOCK_SHARED); xfs_iunlock(qip, lock_mode);
if (error) if (error)
break; break;
......
...@@ -20,12 +20,28 @@ ...@@ -20,12 +20,28 @@
#include "xfs_dquot_item.h" #include "xfs_dquot_item.h"
#include "xfs_dquot.h" #include "xfs_dquot.h"
#include "xfs_quota_priv.h"
struct xfs_inode; struct xfs_inode;
extern struct kmem_zone *xfs_qm_dqtrxzone; extern struct kmem_zone *xfs_qm_dqtrxzone;
/*
* Number of bmaps that we ask from bmapi when doing a quotacheck.
* We make this restriction to keep the memory usage to a minimum.
*/
#define XFS_DQITER_MAP_SIZE 10
#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
!dqp->q_core.d_blk_hardlimit && \
!dqp->q_core.d_blk_softlimit && \
!dqp->q_core.d_rtb_hardlimit && \
!dqp->q_core.d_rtb_softlimit && \
!dqp->q_core.d_ino_hardlimit && \
!dqp->q_core.d_ino_softlimit && \
!dqp->q_core.d_bcount && \
!dqp->q_core.d_rtbcount && \
!dqp->q_core.d_icount)
/* /*
* This defines the unit of allocation of dquots. * This defines the unit of allocation of dquots.
* Currently, it is just one file system block, and a 4K blk contains 30 * Currently, it is just one file system block, and a 4K blk contains 30
......
...@@ -278,7 +278,7 @@ xfs_qm_scall_trunc_qfiles( ...@@ -278,7 +278,7 @@ xfs_qm_scall_trunc_qfiles(
xfs_mount_t *mp, xfs_mount_t *mp,
uint flags) uint flags)
{ {
int error = 0, error2 = 0; int error;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
xfs_debug(mp, "%s: flags=%x m_qflags=%x", xfs_debug(mp, "%s: flags=%x m_qflags=%x",
...@@ -286,14 +286,20 @@ xfs_qm_scall_trunc_qfiles( ...@@ -286,14 +286,20 @@ xfs_qm_scall_trunc_qfiles(
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
} }
if (flags & XFS_DQ_USER) if (flags & XFS_DQ_USER) {
error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
if (flags & XFS_DQ_GROUP) if (error)
error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); return error;
}
if (flags & XFS_DQ_GROUP) {
error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
if (error)
return error;
}
if (flags & XFS_DQ_PROJ) if (flags & XFS_DQ_PROJ)
error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
return error ? error : error2; return error;
} }
/* /*
......
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_QUOTA_PRIV_H__
#define __XFS_QUOTA_PRIV_H__
/*
* Number of bmaps that we ask from bmapi when doing a quotacheck.
* We make this restriction to keep the memory usage to a minimum.
*/
#define XFS_DQITER_MAP_SIZE 10
#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
!dqp->q_core.d_blk_hardlimit && \
!dqp->q_core.d_blk_softlimit && \
!dqp->q_core.d_rtb_hardlimit && \
!dqp->q_core.d_rtb_softlimit && \
!dqp->q_core.d_ino_hardlimit && \
!dqp->q_core.d_ino_softlimit && \
!dqp->q_core.d_bcount && \
!dqp->q_core.d_rtbcount && \
!dqp->q_core.d_icount)
#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
(((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
(((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
#endif /* __XFS_QUOTA_PRIV_H__ */
...@@ -64,7 +64,7 @@ typedef struct xfs_log_item { ...@@ -64,7 +64,7 @@ typedef struct xfs_log_item {
struct xfs_item_ops { struct xfs_item_ops {
void (*iop_size)(xfs_log_item_t *, int *, int *); void (*iop_size)(xfs_log_item_t *, int *, int *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); void (*iop_format)(xfs_log_item_t *, struct xfs_log_vec *);
void (*iop_pin)(xfs_log_item_t *); void (*iop_pin)(xfs_log_item_t *);
void (*iop_unpin)(xfs_log_item_t *, int remove); void (*iop_unpin)(xfs_log_item_t *, int remove);
uint (*iop_push)(struct xfs_log_item *, struct list_head *); uint (*iop_push)(struct xfs_log_item *, struct list_head *);
......
...@@ -295,8 +295,8 @@ xfs_trans_mod_dquot( ...@@ -295,8 +295,8 @@ xfs_trans_mod_dquot(
/* /*
* Given an array of dqtrx structures, lock all the dquots associated and join * Given an array of dqtrx structures, lock all the dquots associated and join
* them to the transaction, provided they have been modified. We know that the * them to the transaction, provided they have been modified. We know that the
* highest number of dquots of one type - usr, grp OR prj - involved in a * highest number of dquots of one type - usr, grp and prj - involved in a
* transaction is 2 so we don't need to make this very generic. * transaction is 3 so we don't need to make this very generic.
*/ */
STATIC void STATIC void
xfs_trans_dqlockedjoin( xfs_trans_dqlockedjoin(
......
...@@ -174,7 +174,7 @@ xfs_calc_itruncate_reservation( ...@@ -174,7 +174,7 @@ xfs_calc_itruncate_reservation(
xfs_calc_buf_res(5, 0) + xfs_calc_buf_res(5, 0) +
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
XFS_FSB_TO_B(mp, 1)) + XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0))); mp->m_in_maxlevels, 0)));
} }
...@@ -282,7 +282,7 @@ xfs_calc_create_resv_modify( ...@@ -282,7 +282,7 @@ xfs_calc_create_resv_modify(
* For create we can allocate some inodes giving: * For create we can allocate some inodes giving:
* the agi and agf of the ag getting the new inodes: 2 * sectorsize * the agi and agf of the ag getting the new inodes: 2 * sectorsize
* the superblock for the nlink flag: sector size * the superblock for the nlink flag: sector size
* the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize * the inode blocks allocated: mp->m_ialloc_blks * blocksize
* the inode btree: max depth * blocksize * the inode btree: max depth * blocksize
* the allocation btrees: 2 trees * (max depth - 1) * block size * the allocation btrees: 2 trees * (max depth - 1) * block size
*/ */
...@@ -292,7 +292,7 @@ xfs_calc_create_resv_alloc( ...@@ -292,7 +292,7 @@ xfs_calc_create_resv_alloc(
{ {
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize + mp->m_sb.sb_sectsize +
xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) + xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
XFS_FSB_TO_B(mp, 1)); XFS_FSB_TO_B(mp, 1));
...@@ -385,9 +385,9 @@ xfs_calc_ifree_reservation( ...@@ -385,9 +385,9 @@ xfs_calc_ifree_reservation(
xfs_calc_inode_res(mp, 1) + xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) + max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size) +
xfs_calc_buf_res(1, 0) + xfs_calc_buf_res(1, 0) +
xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0) + mp->m_in_maxlevels, 0) +
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
XFS_FSB_TO_B(mp, 1)); XFS_FSB_TO_B(mp, 1));
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#define XFS_DIRREMOVE_SPACE_RES(mp) \ #define XFS_DIRREMOVE_SPACE_RES(mp) \
XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
#define XFS_IALLOC_SPACE_RES(mp) \ #define XFS_IALLOC_SPACE_RES(mp) \
(XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1) ((mp)->m_ialloc_blks + (mp)->m_in_maxlevels - 1)
/* /*
* Space reservation values for various transactions. * Space reservation values for various transactions.
......
...@@ -34,15 +34,6 @@ struct attrlist_cursor_kern; ...@@ -34,15 +34,6 @@ struct attrlist_cursor_kern;
{ IO_ISDIRECT, "DIRECT" }, \ { IO_ISDIRECT, "DIRECT" }, \
{ IO_INVIS, "INVIS"} { IO_INVIS, "INVIS"}
/*
* Flush/Invalidate options for vop_toss/flush/flushinval_pages.
*/
#define FI_NONE 0 /* none */
#define FI_REMAPF 1 /* Do a remapf prior to the operation */
#define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation.
Prevent VM access to the pages until
the operation completes. */
/* /*
* Some useful predicates. * Some useful predicates.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment