Commit 412dd3a6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-for-linus-3.16-rc1' of git://oss.sgi.com/xfs/xfs

Pull xfs updates from Dave Chinner:
 "This update contains:
   - cleanup removing unused function args
   - rework of the filestreams allocator to use dentry cache parent
     lookups
   - new on-disk free inode btree and optimised inode allocator
   - various bug fixes
   - rework of internal attribute API
   - cleanup of superblock feature bit support to remove historic cruft
   - more fixes and minor cleanups
   - added a new directory/attribute geometry abstraction
   - yet more fixes and minor cleanups"

* tag 'xfs-for-linus-3.16-rc1' of git://oss.sgi.com/xfs/xfs: (86 commits)
  xfs: fix xfs_da_args sparse warning in xfs_readdir
  xfs: Fix rounding in xfs_alloc_fix_len()
  xfs: tone down writepage/releasepage WARN_ONs
  xfs: small cleanup in xfs_lowbit64()
  xfs: kill xfs_buf_geterror()
  xfs: xfs_readsb needs to check for magic numbers
  xfs: block allocation work needs to be kswapd aware
  xfs: remove redundant geometry information from xfs_da_state
  xfs: replace attr LBSIZE with xfs_da_geometry
  xfs: pass xfs_da_args to xfs_attr_leaf_newentsize
  xfs: use xfs_da_geometry for block size in attr code
  xfs: remove mp->m_dir_geo from directory logging
  xfs: reduce direct usage of mp->m_dir_geo
  xfs: move node entry counts to xfs_da_geometry
  xfs: convert dir/attr btree threshold to xfs_da_geometry
  xfs: convert m_dirblksize to xfs_da_geometry
  xfs: convert m_dirblkfsbs to xfs_da_geometry
  xfs: convert directory segment limits to xfs_da_geometry
  xfs: convert directory db conversion to xfs_da_geometry
  xfs: convert directory dablk conversion to xfs_da_geometry
  ...
parents 23d4ed53 7691283d
......@@ -278,6 +278,17 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
return ret;
}
static int quota_rmxquota(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->rm_xquota)
return -ENOSYS;
return sb->s_qcop->rm_xquota(sb, flags);
}
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr, struct path *path)
......@@ -316,8 +327,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
return sb->s_qcop->quota_sync(sb, type);
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
return quota_setxstate(sb, cmd, addr);
case Q_XQUOTARM:
return quota_rmxquota(sb, addr);
case Q_XGETQSTAT:
return quota_getxstate(sb, addr);
case Q_XGETQSTATV:
......
......@@ -160,30 +160,38 @@ typedef struct xfs_agi {
* still being referenced.
*/
__be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
/*
* This marks the end of logging region 1 and start of logging region 2.
*/
uuid_t agi_uuid; /* uuid of filesystem */
__be32 agi_crc; /* crc of agi sector */
__be32 agi_pad32;
__be64 agi_lsn; /* last write sequence */
__be32 agi_free_root; /* root of the free inode btree */
__be32 agi_free_level;/* levels in free inode btree */
/* structure must be padded to 64 bit alignment */
} xfs_agi_t;
#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
#define XFS_AGI_MAGICNUM 0x00000001
#define XFS_AGI_VERSIONNUM 0x00000002
#define XFS_AGI_SEQNO 0x00000004
#define XFS_AGI_LENGTH 0x00000008
#define XFS_AGI_COUNT 0x00000010
#define XFS_AGI_ROOT 0x00000020
#define XFS_AGI_LEVEL 0x00000040
#define XFS_AGI_FREECOUNT 0x00000080
#define XFS_AGI_NEWINO 0x00000100
#define XFS_AGI_DIRINO 0x00000200
#define XFS_AGI_UNLINKED 0x00000400
#define XFS_AGI_NUM_BITS 11
#define XFS_AGI_ALL_BITS ((1 << XFS_AGI_NUM_BITS) - 1)
#define XFS_AGI_MAGICNUM (1 << 0)
#define XFS_AGI_VERSIONNUM (1 << 1)
#define XFS_AGI_SEQNO (1 << 2)
#define XFS_AGI_LENGTH (1 << 3)
#define XFS_AGI_COUNT (1 << 4)
#define XFS_AGI_ROOT (1 << 5)
#define XFS_AGI_LEVEL (1 << 6)
#define XFS_AGI_FREECOUNT (1 << 7)
#define XFS_AGI_NEWINO (1 << 8)
#define XFS_AGI_DIRINO (1 << 9)
#define XFS_AGI_UNLINKED (1 << 10)
#define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */
#define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1)
#define XFS_AGI_FREE_ROOT (1 << 11)
#define XFS_AGI_FREE_LEVEL (1 << 12)
#define XFS_AGI_NUM_BITS_R2 13
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
......
......@@ -257,16 +257,14 @@ xfs_alloc_fix_len(
k = rlen % args->prod;
if (k == args->mod)
return;
if (k > args->mod) {
if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
return;
} else {
if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
(int)args->minlen)
if (k > args->mod)
rlen = rlen - (k - args->mod);
else
rlen = rlen - args->prod + (args->mod - k);
if ((int)rlen < (int)args->minlen)
return;
}
ASSERT(rlen >= args->minlen);
ASSERT(rlen <= args->maxlen);
ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
ASSERT(rlen % args->prod == args->mod);
args->len = rlen;
}
......@@ -541,7 +539,6 @@ xfs_alloc_read_agfl(
XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
if (error)
return error;
ASSERT(!xfs_buf_geterror(bp));
xfs_buf_set_ref(bp, XFS_AGFL_REF);
*bpp = bp;
return 0;
......
......@@ -70,7 +70,6 @@ xfs_allocbt_alloc_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *start,
union xfs_btree_ptr *new,
int length,
int *stat)
{
int error;
......
......@@ -975,14 +975,39 @@ xfs_vm_writepage(
* Given that we do not allow direct reclaim to call us, we should
* never be called while in a filesystem transaction.
*/
if (WARN_ON(current->flags & PF_FSTRANS))
if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
goto redirty;
/* Is this page beyond the end of the file? */
offset = i_size_read(inode);
end_index = offset >> PAGE_CACHE_SHIFT;
last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
if (page->index >= end_index) {
/*
* The page index is less than the end_index, adjust the end_offset
* to the highest offset that this page should represent.
* -----------------------------------------------------
* | file mapping | <EOF> |
* -----------------------------------------------------
* | Page ... | Page N-2 | Page N-1 | Page N | |
* ^--------------------------------^----------|--------
* | desired writeback range | see else |
* ---------------------------------^------------------|
*/
if (page->index < end_index)
end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
else {
/*
* Check whether the page to write out is beyond or straddles
* i_size or not.
* -------------------------------------------------------
* | file mapping | <EOF> |
* -------------------------------------------------------
* | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
* ^--------------------------------^-----------|---------
* | | Straddles |
* ---------------------------------^-----------|--------|
*/
unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
/*
......@@ -990,8 +1015,20 @@ xfs_vm_writepage(
* truncate operation that is in progress. We must redirty the
* page so that reclaim stops reclaiming it. Otherwise
* xfs_vm_releasepage() is called on it and gets confused.
*
* Note that the end_index is unsigned long, it would overflow
* if the given offset is greater than 16TB on 32-bit system
* and if we do check the page is fully outside i_size or not
* via "if (page->index >= end_index + 1)" as "end_index + 1"
* will be evaluated to 0. Hence this page will be redirtied
* and be written out repeatedly which would result in an
* infinite loop, the user program that perform this operation
* will hang. Instead, we can verify this situation by checking
* if the page to write is totally beyond the i_size or if it's
* offset is just equal to the EOF.
*/
if (page->index >= end_index + 1 || offset_into_page == 0)
if (page->index > end_index ||
(page->index == end_index && offset_into_page == 0))
goto redirty;
/*
......@@ -1003,11 +1040,11 @@ xfs_vm_writepage(
* not written out to the file."
*/
zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
/* Adjust the end_offset to the end of file */
end_offset = offset;
}
end_offset = min_t(unsigned long long,
(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
offset);
len = 1 << inode->i_blkbits;
bh = head = page_buffers(page);
......@@ -1188,9 +1225,9 @@ xfs_vm_releasepage(
xfs_count_page_state(page, &delalloc, &unwritten);
if (WARN_ON(delalloc))
if (WARN_ON_ONCE(delalloc))
return 0;
if (WARN_ON(unwritten))
if (WARN_ON_ONCE(unwritten))
return 0;
return try_to_free_buffers(page);
......
This diff is collapsed.
This diff is collapsed.
......@@ -96,8 +96,7 @@ int xfs_attr3_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp);
int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
int *local);
int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local);
int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
struct xfs_buf **bpp);
......
......@@ -444,6 +444,7 @@ xfs_attr3_leaf_list_int(
xfs_da_args_t args;
memset((char *)&args, 0, sizeof(args));
args.geo = context->dp->i_mount->m_attr_geo;
args.dp = context->dp;
args.whichfork = XFS_ATTR_FORK;
args.valuelen = valuelen;
......
......@@ -68,7 +68,6 @@ xfs_attr3_rmt_blocks(
*/
static bool
xfs_attr3_rmt_hdr_ok(
struct xfs_mount *mp,
void *ptr,
xfs_ino_t ino,
uint32_t offset,
......@@ -126,6 +125,7 @@ xfs_attr3_rmt_read_verify(
char *ptr;
int len;
xfs_daddr_t bno;
int blksize = mp->m_attr_geo->blksize;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
......@@ -134,21 +134,20 @@ xfs_attr3_rmt_read_verify(
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
ASSERT(len >= blksize);
while (len > 0) {
if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
XFS_ATTR3_RMT_CRC_OFF)) {
if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
xfs_buf_ioerror(bp, EFSBADCRC);
break;
}
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
xfs_buf_ioerror(bp, EFSCORRUPTED);
break;
}
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
len -= blksize;
ptr += blksize;
bno += BTOBB(blksize);
}
if (bp->b_error)
......@@ -166,6 +165,7 @@ xfs_attr3_rmt_write_verify(
char *ptr;
int len;
xfs_daddr_t bno;
int blksize = mp->m_attr_geo->blksize;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
......@@ -174,10 +174,10 @@ xfs_attr3_rmt_write_verify(
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
ASSERT(len >= blksize);
while (len > 0) {
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
xfs_buf_ioerror(bp, EFSCORRUPTED);
xfs_verifier_error(bp);
return;
......@@ -188,11 +188,11 @@ xfs_attr3_rmt_write_verify(
rmt = (struct xfs_attr3_rmt_hdr *)ptr;
rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
}
xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
len -= blksize;
ptr += blksize;
bno += BTOBB(blksize);
}
ASSERT(len == 0);
}
......@@ -241,17 +241,18 @@ xfs_attr_rmtval_copyout(
char *src = bp->b_addr;
xfs_daddr_t bno = bp->b_bn;
int len = BBTOB(bp->b_length);
int blksize = mp->m_attr_geo->blksize;
ASSERT(len >= XFS_LBSIZE(mp));
ASSERT(len >= blksize);
while (len > 0 && *valuelen > 0) {
int hdr_size = 0;
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
byte_cnt = min(*valuelen, byte_cnt);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
if (!xfs_attr3_rmt_hdr_ok(src, ino, *offset,
byte_cnt, bno)) {
xfs_alert(mp,
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
......@@ -264,9 +265,9 @@ xfs_attr_rmtval_copyout(
memcpy(*dst, src + hdr_size, byte_cnt);
/* roll buffer forwards */
len -= XFS_LBSIZE(mp);
src += XFS_LBSIZE(mp);
bno += mp->m_bsize;
len -= blksize;
src += blksize;
bno += BTOBB(blksize);
/* roll attribute data forwards */
*valuelen -= byte_cnt;
......@@ -288,12 +289,13 @@ xfs_attr_rmtval_copyin(
char *dst = bp->b_addr;
xfs_daddr_t bno = bp->b_bn;
int len = BBTOB(bp->b_length);
int blksize = mp->m_attr_geo->blksize;
ASSERT(len >= XFS_LBSIZE(mp));
ASSERT(len >= blksize);
while (len > 0 && *valuelen > 0) {
int hdr_size;
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
byte_cnt = min(*valuelen, byte_cnt);
hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
......@@ -305,17 +307,17 @@ xfs_attr_rmtval_copyin(
* If this is the last block, zero the remainder of it.
* Check that we are actually the last block, too.
*/
if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
if (byte_cnt + hdr_size < blksize) {
ASSERT(*valuelen - byte_cnt == 0);
ASSERT(len == XFS_LBSIZE(mp));
ASSERT(len == blksize);
memset(dst + hdr_size + byte_cnt, 0,
XFS_LBSIZE(mp) - hdr_size - byte_cnt);
blksize - hdr_size - byte_cnt);
}
/* roll buffer forwards */
len -= XFS_LBSIZE(mp);
dst += XFS_LBSIZE(mp);
bno += mp->m_bsize;
len -= blksize;
dst += blksize;
bno += BTOBB(blksize);
/* roll attribute data forwards */
*valuelen -= byte_cnt;
......
......@@ -66,9 +66,12 @@ static inline int xfs_lowbit64(__uint64_t v)
n = ffs(w);
} else { /* upper bits */
w = (__uint32_t)(v >> 32);
if (w && (n = ffs(w)))
if (w) {
n = ffs(w);
if (n)
n += 32;
}
}
return n - 1;
}
......
......@@ -94,7 +94,7 @@ xfs_bmap_compute_maxlevels(
maxleafents = MAXAEXTNUM;
sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
}
maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
minleafrecs = mp->m_bmap_dmnr[0];
minnoderecs = mp->m_bmap_dmnr[1];
maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
......@@ -233,7 +233,6 @@ xfs_default_attroffset(
*/
STATIC void
xfs_bmap_forkoff_reset(
xfs_mount_t *mp,
xfs_inode_t *ip,
int whichfork)
{
......@@ -905,7 +904,7 @@ xfs_bmap_local_to_extents_empty(
ASSERT(ifp->if_bytes == 0);
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
xfs_bmap_forkoff_reset(ip, whichfork);
ifp->if_flags &= ~XFS_IFINLINE;
ifp->if_flags |= XFS_IFEXTENTS;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
......@@ -1099,10 +1098,11 @@ xfs_bmap_add_attrfork_local(
if (S_ISDIR(ip->i_d.di_mode)) {
memset(&dargs, 0, sizeof(dargs));
dargs.geo = ip->i_mount->m_dir_geo;
dargs.dp = ip;
dargs.firstblock = firstblock;
dargs.flist = flist;
dargs.total = ip->i_mount->m_dirblkfsbs;
dargs.total = dargs.geo->fsbcount;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
return xfs_dir2_sf_to_block(&dargs);
......@@ -1675,7 +1675,6 @@ xfs_bmap_isaeof(
*/
int
xfs_bmap_last_offset(
struct xfs_trans *tp,
struct xfs_inode *ip,
xfs_fileoff_t *last_block,
int whichfork)
......@@ -3517,6 +3516,67 @@ xfs_bmap_adjacent(
#undef ISVALID
}
static int
xfs_bmap_longest_free_extent(
struct xfs_trans *tp,
xfs_agnumber_t ag,
xfs_extlen_t *blen,
int *notinit)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_perag *pag;
xfs_extlen_t longest;
int error = 0;
pag = xfs_perag_get(mp, ag);
if (!pag->pagf_init) {
error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
if (error)
goto out;
if (!pag->pagf_init) {
*notinit = 1;
goto out;
}
}
longest = xfs_alloc_longest_free_extent(mp, pag);
if (*blen < longest)
*blen = longest;
out:
xfs_perag_put(pag);
return error;
}
static void
xfs_bmap_select_minlen(
struct xfs_bmalloca *ap,
struct xfs_alloc_arg *args,
xfs_extlen_t *blen,
int notinit)
{
if (notinit || *blen < ap->minlen) {
/*
* Since we did a BUF_TRYLOCK above, it is possible that
* there is space for this request.
*/
args->minlen = ap->minlen;
} else if (*blen < args->maxlen) {
/*
* If the best seen length is less than the request length,
* use the best as the minimum.
*/
args->minlen = *blen;
} else {
/*
* Otherwise we've seen an extent as big as maxlen, use that
* as the minimum.
*/
args->minlen = args->maxlen;
}
}
STATIC int
xfs_bmap_btalloc_nullfb(
struct xfs_bmalloca *ap,
......@@ -3524,111 +3584,74 @@ xfs_bmap_btalloc_nullfb(
xfs_extlen_t *blen)
{
struct xfs_mount *mp = ap->ip->i_mount;
struct xfs_perag *pag;
xfs_agnumber_t ag, startag;
int notinit = 0;
int error;
if (ap->userdata && xfs_inode_is_filestream(ap->ip))
args->type = XFS_ALLOCTYPE_NEAR_BNO;
else
args->type = XFS_ALLOCTYPE_START_BNO;
args->total = ap->total;
/*
* Search for an allocation group with a single extent large enough
* for the request. If one isn't found, then adjust the minimum
* allocation size to the largest space found.
*/
startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
if (startag == NULLAGNUMBER)
startag = ag = 0;
pag = xfs_perag_get(mp, ag);
while (*blen < args->maxlen) {
if (!pag->pagf_init) {
error = xfs_alloc_pagf_init(mp, args->tp, ag,
XFS_ALLOC_FLAG_TRYLOCK);
if (error) {
xfs_perag_put(pag);
error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
&notinit);
if (error)
return error;
}
if (++ag == mp->m_sb.sb_agcount)
ag = 0;
if (ag == startag)
break;
}
/*
* See xfs_alloc_fix_freelist...
*/
if (pag->pagf_init) {
xfs_extlen_t longest;
longest = xfs_alloc_longest_free_extent(mp, pag);
if (*blen < longest)
*blen = longest;
} else
notinit = 1;
xfs_bmap_select_minlen(ap, args, blen, notinit);
return 0;
}
if (xfs_inode_is_filestream(ap->ip)) {
if (*blen >= args->maxlen)
break;
STATIC int
xfs_bmap_btalloc_filestreams(
struct xfs_bmalloca *ap,
struct xfs_alloc_arg *args,
xfs_extlen_t *blen)
{
struct xfs_mount *mp = ap->ip->i_mount;
xfs_agnumber_t ag;
int notinit = 0;
int error;
if (ap->userdata) {
/*
* If startag is an invalid AG, we've
* come here once before and
* xfs_filestream_new_ag picked the
* best currently available.
*
* Don't continue looping, since we
* could loop forever.
*/
if (startag == NULLAGNUMBER)
break;
args->type = XFS_ALLOCTYPE_NEAR_BNO;
args->total = ap->total;
ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
if (ag == NULLAGNUMBER)
ag = 0;
error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
if (error)
return error;
if (*blen < args->maxlen) {
error = xfs_filestream_new_ag(ap, &ag);
xfs_perag_put(pag);
if (error)
return error;
/* loop again to set 'blen'*/
startag = NULLAGNUMBER;
pag = xfs_perag_get(mp, ag);
continue;
}
}
if (++ag == mp->m_sb.sb_agcount)
ag = 0;
if (ag == startag)
break;
xfs_perag_put(pag);
pag = xfs_perag_get(mp, ag);
error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
&notinit);
if (error)
return error;
}
xfs_perag_put(pag);
/*
* Since the above loop did a BUF_TRYLOCK, it is
* possible that there is space for this request.
*/
if (notinit || *blen < ap->minlen)
args->minlen = ap->minlen;
/*
* If the best seen length is less than the request
* length, use the best as the minimum.
*/
else if (*blen < args->maxlen)
args->minlen = *blen;
/*
* Otherwise we've seen an extent as big as maxlen,
* use that as the minimum.
*/
else
args->minlen = args->maxlen;
xfs_bmap_select_minlen(ap, args, blen, notinit);
/*
* set the failure fallback case to look in the selected
* AG as the stream may have moved.
* Set the failure fallback case to look in the selected AG as stream
* may have moved.
*/
if (xfs_inode_is_filestream(ap->ip))
ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
return 0;
}
......@@ -3708,6 +3731,14 @@ xfs_bmap_btalloc(
args.firstblock = *ap->firstblock;
blen = 0;
if (nullfb) {
/*
* Search for an allocation group with a single extent large
* enough for the request. If one isn't found, then adjust
* the minimum allocation size to the largest space found.
*/
if (ap->userdata && xfs_inode_is_filestream(ap->ip))
error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
else
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
if (error)
return error;
......
......@@ -156,8 +156,8 @@ int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *last_block, int whichfork);
int xfs_bmap_last_offset(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *unused, int whichfork);
int xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
int whichfork);
int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork);
......
......@@ -84,7 +84,7 @@ xfs_bmdr_to_bmbt(
rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
......@@ -443,7 +443,7 @@ xfs_bmbt_to_bmdr(
ASSERT(rblock->bb_level != 0);
dblock->bb_level = rblock->bb_level;
dblock->bb_numrecs = rblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
......@@ -519,7 +519,6 @@ xfs_bmbt_alloc_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *start,
union xfs_btree_ptr *new,
int length,
int *stat)
{
xfs_alloc_arg_t args; /* block allocation args */
......@@ -672,8 +671,7 @@ xfs_bmbt_get_dmaxrecs(
{
if (level != cur->bc_nlevels - 1)
return cur->bc_mp->m_bmap_dmxr[level != 0];
return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize,
level == 0);
return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
}
STATIC void
......@@ -914,7 +912,6 @@ xfs_bmbt_maxrecs(
*/
int
xfs_bmdr_maxrecs(
struct xfs_mount *mp,
int blocklen,
int leaf)
{
......
......@@ -130,7 +130,7 @@ extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
xfs_bmdr_block_t *, int);
extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level);
extern int xfs_bmdr_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern int xfs_bmdr_maxrecs(int blocklen, int leaf);
extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
......
......@@ -258,14 +258,23 @@ xfs_bmapi_allocate_worker(
struct xfs_bmalloca *args = container_of(work,
struct xfs_bmalloca, work);
unsigned long pflags;
unsigned long new_pflags = PF_FSTRANS;
/* we are in a transaction context here */
current_set_flags_nested(&pflags, PF_FSTRANS);
/*
* we are in a transaction context here, but may also be doing work
* in kswapd context, and hence we may need to inherit that state
* temporarily to ensure that we don't block waiting for memory reclaim
* in any way.
*/
if (args->kswapd)
new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
current_set_flags_nested(&pflags, new_pflags);
args->result = __xfs_bmapi_allocate(args);
complete(args->done);
current_restore_flags_nested(&pflags, PF_FSTRANS);
current_restore_flags_nested(&pflags, new_pflags);
}
/*
......@@ -284,6 +293,7 @@ xfs_bmapi_allocate(
args->done = &done;
args->kswapd = current_is_kswapd();
INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
queue_work(xfs_alloc_wq, &args->work);
wait_for_completion(&done);
......@@ -1519,7 +1529,6 @@ xfs_collapse_file_space(
while (!error && !done) {
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
tp->t_flags |= XFS_TRANS_RESERVE;
/*
* We would need to reserve permanent block for transaction.
* This will come into picture when after shifting extent into
......@@ -1529,7 +1538,6 @@ xfs_collapse_file_space(
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
if (error) {
ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
break;
}
......
......@@ -50,12 +50,13 @@ struct xfs_bmalloca {
xfs_extlen_t total; /* total blocks needed for xaction */
xfs_extlen_t minlen; /* minimum allocation size (blocks) */
xfs_extlen_t minleft; /* amount must be left after alloc */
char eof; /* set if allocating past last extent */
char wasdel; /* replacing a delayed allocation */
char userdata;/* set if is user data */
char aeof; /* allocated space at eof */
char conv; /* overwriting unwritten extents */
char stack_switch;
bool eof; /* set if allocating past last extent */
bool wasdel; /* replacing a delayed allocation */
bool userdata;/* set if is user data */
bool aeof; /* allocated space at eof */
bool conv; /* overwriting unwritten extents */
bool stack_switch;
bool kswapd; /* allocation in kswapd context */
int flags;
struct completion *done;
struct work_struct work;
......
......@@ -43,9 +43,10 @@ kmem_zone_t *xfs_btree_cur_zone;
* Btree magic numbers.
*/
static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
{ XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC },
{ XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
XFS_FIBT_MAGIC },
{ XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC,
XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC }
XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
};
#define xfs_btree_magic(cur) \
xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
......@@ -552,14 +553,11 @@ xfs_btree_get_bufl(
xfs_fsblock_t fsbno, /* file system block number */
uint lock) /* lock flags for get_buf */
{
xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(!xfs_buf_geterror(bp));
return bp;
return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
}
/*
......@@ -574,15 +572,12 @@ xfs_btree_get_bufs(
xfs_agblock_t agbno, /* allocation group block number */
uint lock) /* lock flags for get_buf */
{
xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
ASSERT(!xfs_buf_geterror(bp));
return bp;
return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
}
/*
......@@ -722,7 +717,6 @@ xfs_btree_read_bufl(
mp->m_bsize, lock, &bp, ops);
if (error)
return error;
ASSERT(!xfs_buf_geterror(bp));
if (bp)
xfs_buf_set_ref(bp, refval);
*bpp = bp;
......@@ -1115,6 +1109,7 @@ xfs_btree_set_refs(
xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
break;
case XFS_BTNUM_INO:
case XFS_BTNUM_FINO:
xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
break;
case XFS_BTNUM_BMAP:
......@@ -1159,7 +1154,6 @@ STATIC int
xfs_btree_read_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
int level,
int flags,
struct xfs_btree_block **block,
struct xfs_buf **bpp)
......@@ -1178,7 +1172,6 @@ xfs_btree_read_buf_block(
if (error)
return error;
ASSERT(!xfs_buf_geterror(*bpp));
xfs_btree_set_refs(cur, *bpp);
*block = XFS_BUF_TO_BLOCK(*bpp);
return 0;
......@@ -1517,8 +1510,8 @@ xfs_btree_increment(
union xfs_btree_ptr *ptrp;
ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
error = xfs_btree_read_buf_block(cur, ptrp, --lev,
0, &block, &bp);
--lev;
error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
if (error)
goto error0;
......@@ -1616,8 +1609,8 @@ xfs_btree_decrement(
union xfs_btree_ptr *ptrp;
ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
error = xfs_btree_read_buf_block(cur, ptrp, --lev,
0, &block, &bp);
--lev;
error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
if (error)
goto error0;
xfs_btree_setbuf(cur, lev, bp);
......@@ -1667,7 +1660,7 @@ xfs_btree_lookup_get_block(
return 0;
}
error = xfs_btree_read_buf_block(cur, pp, level, 0, blkp, &bp);
error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
if (error)
return error;
......@@ -2018,7 +2011,7 @@ xfs_btree_lshift(
goto out0;
/* Set up the left neighbor as "left". */
error = xfs_btree_read_buf_block(cur, &lptr, level, 0, &left, &lbp);
error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
if (error)
goto error0;
......@@ -2202,7 +2195,7 @@ xfs_btree_rshift(
goto out0;
/* Set up the right neighbor as "right". */
error = xfs_btree_read_buf_block(cur, &rptr, level, 0, &right, &rbp);
error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
if (error)
goto error0;
......@@ -2372,7 +2365,7 @@ xfs_btree_split(
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, 1, stat);
error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
if (error)
goto error0;
if (*stat == 0)
......@@ -2470,7 +2463,7 @@ xfs_btree_split(
* point back to right instead of to left.
*/
if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
error = xfs_btree_read_buf_block(cur, &rrptr, level,
error = xfs_btree_read_buf_block(cur, &rrptr,
0, &rrblock, &rrbp);
if (error)
goto error0;
......@@ -2545,7 +2538,7 @@ xfs_btree_new_iroot(
pp = xfs_btree_ptr_addr(cur, 1, block);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, pp, &nptr, 1, stat);
error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
if (error)
goto error0;
if (*stat == 0) {
......@@ -2649,7 +2642,7 @@ xfs_btree_new_root(
cur->bc_ops->init_ptr_from_cur(cur, &rptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, 1, stat);
error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
if (error)
goto error0;
if (*stat == 0)
......@@ -2684,8 +2677,7 @@ xfs_btree_new_root(
lbp = bp;
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
left = block;
error = xfs_btree_read_buf_block(cur, &rptr,
cur->bc_nlevels - 1, 0, &right, &rbp);
error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
if (error)
goto error0;
bp = rbp;
......@@ -2696,8 +2688,7 @@ xfs_btree_new_root(
xfs_btree_buf_to_ptr(cur, rbp, &rptr);
right = block;
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
error = xfs_btree_read_buf_block(cur, &lptr,
cur->bc_nlevels - 1, 0, &left, &lbp);
error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
if (error)
goto error0;
bp = lbp;
......@@ -3649,8 +3640,7 @@ xfs_btree_delrec(
rptr = cptr;
right = block;
rbp = bp;
error = xfs_btree_read_buf_block(cur, &lptr, level,
0, &left, &lbp);
error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
if (error)
goto error0;
......@@ -3667,8 +3657,7 @@ xfs_btree_delrec(
lptr = cptr;
left = block;
lbp = bp;
error = xfs_btree_read_buf_block(cur, &rptr, level,
0, &right, &rbp);
error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
if (error)
goto error0;
......@@ -3740,8 +3729,7 @@ xfs_btree_delrec(
/* If there is a right sibling, point it to the remaining block. */
xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
if (!xfs_btree_ptr_is_null(cur, &cptr)) {
error = xfs_btree_read_buf_block(cur, &cptr, level,
0, &rrblock, &rrbp);
error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
if (error)
goto error0;
xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
......
......@@ -62,6 +62,7 @@ union xfs_btree_rec {
#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi)
#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi)
/*
* For logging record fields.
......@@ -92,6 +93,7 @@ do { \
case XFS_BTNUM_CNT: __XFS_BTREE_STATS_INC(abtc, stat); break; \
case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(bmbt, stat); break; \
case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(ibt, stat); break; \
case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(fibt, stat); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
......@@ -105,6 +107,7 @@ do { \
case XFS_BTNUM_CNT: __XFS_BTREE_STATS_ADD(abtc, stat, val); break; \
case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_ADD(bmbt, stat, val); break; \
case XFS_BTNUM_INO: __XFS_BTREE_STATS_ADD(ibt, stat, val); break; \
case XFS_BTNUM_FINO: __XFS_BTREE_STATS_ADD(fibt, stat, val); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
......@@ -129,7 +132,7 @@ struct xfs_btree_ops {
int (*alloc_block)(struct xfs_btree_cur *cur,
union xfs_btree_ptr *start_bno,
union xfs_btree_ptr *new_bno,
int length, int *stat);
int *stat);
int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
/* update last record information */
......
......@@ -216,8 +216,7 @@ _xfs_buf_alloc(
STATIC int
_xfs_buf_get_pages(
xfs_buf_t *bp,
int page_count,
xfs_buf_flags_t flags)
int page_count)
{
/* Make sure that we have a page list */
if (bp->b_pages == NULL) {
......@@ -330,7 +329,7 @@ xfs_buf_allocate_memory(
end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
>> PAGE_SHIFT;
page_count = end - start;
error = _xfs_buf_get_pages(bp, page_count, flags);
error = _xfs_buf_get_pages(bp, page_count);
if (unlikely(error))
return error;
......@@ -778,7 +777,7 @@ xfs_buf_associate_memory(
bp->b_pages = NULL;
bp->b_addr = mem;
rval = _xfs_buf_get_pages(bp, page_count, 0);
rval = _xfs_buf_get_pages(bp, page_count);
if (rval)
return rval;
......@@ -811,7 +810,7 @@ xfs_buf_get_uncached(
goto fail;
page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
error = _xfs_buf_get_pages(bp, page_count, 0);
error = _xfs_buf_get_pages(bp, page_count);
if (error)
goto fail_free_buf;
......@@ -1615,7 +1614,6 @@ xfs_free_buftarg(
int
xfs_setsize_buftarg(
xfs_buftarg_t *btp,
unsigned int blocksize,
unsigned int sectorsize)
{
/* Set up metadata sector size info */
......@@ -1650,16 +1648,13 @@ xfs_setsize_buftarg_early(
xfs_buftarg_t *btp,
struct block_device *bdev)
{
return xfs_setsize_buftarg(btp, PAGE_SIZE,
bdev_logical_block_size(bdev));
return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
}
xfs_buftarg_t *
xfs_alloc_buftarg(
struct xfs_mount *mp,
struct block_device *bdev,
int external,
const char *fsname)
struct block_device *bdev)
{
xfs_buftarg_t *btp;
......
......@@ -298,11 +298,6 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
extern int xfs_bioerror_relse(struct xfs_buf *);
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
return bp ? bp->b_error : ENOMEM;
}
/* Buffer Utility Routines */
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
......@@ -387,10 +382,10 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
* Handling of buftargs.
*/
extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
struct block_device *, int, const char *);
struct block_device *);
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
......
......@@ -812,7 +812,6 @@ xfs_buf_item_init(
*/
static void
xfs_buf_item_log_segment(
struct xfs_buf_log_item *bip,
uint first,
uint last,
uint *map)
......@@ -920,7 +919,7 @@ xfs_buf_item_log(
if (end > last)
end = last;
xfs_buf_item_log_segment(bip, first, end,
xfs_buf_item_log_segment(first, end,
&bip->bli_formats[i].blf_data_map[0]);
start += bp->b_maps[i].bm_len;
......@@ -1053,7 +1052,7 @@ xfs_buf_iodone_callbacks(
static ulong lasttime;
static xfs_buftarg_t *lasttarg;
if (likely(!xfs_buf_geterror(bp)))
if (likely(!bp->b_error))
goto do_callbacks;
/*
......
......@@ -167,8 +167,8 @@ xfs_da3_node_verify(
* we don't know if the node is for and attribute or directory tree,
* so only fail if the count is outside both bounds
*/
if (ichdr.count > mp->m_dir_node_ents &&
ichdr.count > mp->m_attr_node_ents)
if (ichdr.count > mp->m_dir_geo->node_ents &&
ichdr.count > mp->m_attr_geo->node_ents)
return false;
/* XXX: hash order check? */
......@@ -598,7 +598,7 @@ xfs_da3_root_split(
* Set up the new root node.
*/
error = xfs_da3_node_create(args,
(args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
(args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
level + 1, &bp, args->whichfork);
if (error)
return error;
......@@ -616,10 +616,10 @@ xfs_da3_root_split(
#ifdef DEBUG
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
ASSERT(blk1->blkno >= mp->m_dirleafblk &&
blk1->blkno < mp->m_dirfreeblk);
ASSERT(blk2->blkno >= mp->m_dirleafblk &&
blk2->blkno < mp->m_dirfreeblk);
ASSERT(blk1->blkno >= args->geo->leafblk &&
blk1->blkno < args->geo->freeblk);
ASSERT(blk2->blkno >= args->geo->leafblk &&
blk2->blkno < args->geo->freeblk);
}
#endif
......@@ -663,7 +663,7 @@ xfs_da3_node_split(
/*
* Do we have to split the node?
*/
if (nodehdr.count + newcount > state->node_ents) {
if (nodehdr.count + newcount > state->args->geo->node_ents) {
/*
* Allocate a new node, add to the doubly linked chain of
* nodes, then move some of our excess entries into it.
......@@ -894,8 +894,8 @@ xfs_da3_node_add(
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
if (state->args->whichfork == XFS_DATA_FORK)
ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
newblk->blkno < state->mp->m_dirfreeblk);
ASSERT(newblk->blkno >= state->args->geo->leafblk &&
newblk->blkno < state->args->geo->freeblk);
/*
* We may need to make some room before we insert the new node.
......@@ -1089,14 +1089,15 @@ xfs_da3_root_join(
* that could occur. For dir3 blocks we also need to update the block
* number in the buffer header.
*/
memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
root_blk->bp->b_ops = bp->b_ops;
xfs_trans_buf_copy_type(root_blk->bp, bp);
if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
}
xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
xfs_trans_log_buf(args->trans, root_blk->bp, 0,
args->geo->blksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
}
......@@ -1139,7 +1140,7 @@ xfs_da3_node_toosmall(
info = blk->bp->b_addr;
node = (xfs_da_intnode_t *)info;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (nodehdr.count > (state->node_ents >> 1)) {
if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return(0); /* blk over 50%, don't try to join */
}
......@@ -1176,8 +1177,8 @@ xfs_da3_node_toosmall(
* We prefer coalescing with the lower numbered sibling so as
* to shrink a directory over time.
*/
count = state->node_ents;
count -= state->node_ents >> 2;
count = state->args->geo->node_ents;
count -= state->args->geo->node_ents >> 2;
count -= nodehdr.count;
/* start with smaller blk num */
......@@ -1472,7 +1473,7 @@ xfs_da3_node_lookup_int(
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
*/
blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0;
for (blk = &state->path.blk[0], state->path.active = 1;
state->path.active <= XFS_DA_NODE_MAXDEPTH;
blk++, state->path.active++) {
......@@ -2090,20 +2091,12 @@ xfs_da_grow_inode(
xfs_dablk_t *new_blkno)
{
xfs_fileoff_t bno;
int count;
int error;
trace_xfs_da_grow_inode(args);
if (args->whichfork == XFS_DATA_FORK) {
bno = args->dp->i_mount->m_dirleafblk;
count = args->dp->i_mount->m_dirblkfsbs;
} else {
bno = 0;
count = 1;
}
error = xfs_da_grow_inode_int(args, &bno, count);
bno = args->geo->leafblk;
error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
if (!error)
*new_blkno = (xfs_dablk_t)bno;
return error;
......@@ -2158,7 +2151,7 @@ xfs_da3_swap_lastblock(
w = args->whichfork;
ASSERT(w == XFS_DATA_FORK);
mp = dp->i_mount;
lastoff = mp->m_dirfreeblk;
lastoff = args->geo->freeblk;
error = xfs_bmap_last_before(tp, dp, &lastoff, w);
if (error)
return error;
......@@ -2170,15 +2163,15 @@ xfs_da3_swap_lastblock(
/*
* Read the last block in the btree space.
*/
last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
if (error)
return error;
/*
* Copy the last block into the dead buffer and log it.
*/
memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
dead_info = dead_buf->b_addr;
/*
* Get values from the moved block.
......@@ -2247,7 +2240,7 @@ xfs_da3_swap_lastblock(
sizeof(sib_info->back)));
sib_buf = NULL;
}
par_blkno = mp->m_dirleafblk;
par_blkno = args->geo->leafblk;
level = -1;
/*
* Walk down the tree looking for the parent of the moved block.
......@@ -2357,10 +2350,7 @@ xfs_da_shrink_inode(
w = args->whichfork;
tp = args->trans;
mp = dp->i_mount;
if (w == XFS_DATA_FORK)
count = mp->m_dirblkfsbs;
else
count = 1;
count = args->geo->fsbcount;
for (;;) {
/*
* Remove extents. If we get ENOSPC for a dir we have to move
......@@ -2462,7 +2452,6 @@ xfs_buf_map_from_irec(
*/
static int
xfs_dabuf_map(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
......@@ -2480,7 +2469,10 @@ xfs_dabuf_map(
ASSERT(map && *map);
ASSERT(*nmaps == 1);
nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
if (whichfork == XFS_DATA_FORK)
nfsb = mp->m_dir_geo->fsbcount;
else
nfsb = mp->m_attr_geo->fsbcount;
/*
* Caller doesn't have a mapping. -2 means don't complain
......@@ -2558,7 +2550,7 @@ xfs_da_get_buf(
*bpp = NULL;
mapp = &map;
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
......@@ -2606,7 +2598,7 @@ xfs_da_read_buf(
*bpp = NULL;
mapp = &map;
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
......@@ -2625,47 +2617,6 @@ xfs_da_read_buf(
xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
else
xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
/*
* This verification code will be moved to a CRC verification callback
* function so just leave it here unchanged until then.
*/
{
xfs_dir2_data_hdr_t *hdr = bp->b_addr;
xfs_dir2_free_t *free = bp->b_addr;
xfs_da_blkinfo_t *info = bp->b_addr;
uint magic, magic1;
struct xfs_mount *mp = dp->i_mount;
magic = be16_to_cpu(info->magic);
magic1 = be32_to_cpu(hdr->magic);
if (unlikely(
XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
(magic != XFS_DA3_NODE_MAGIC) &&
(magic != XFS_ATTR_LEAF_MAGIC) &&
(magic != XFS_ATTR3_LEAF_MAGIC) &&
(magic != XFS_DIR2_LEAF1_MAGIC) &&
(magic != XFS_DIR3_LEAF1_MAGIC) &&
(magic != XFS_DIR2_LEAFN_MAGIC) &&
(magic != XFS_DIR3_LEAFN_MAGIC) &&
(magic1 != XFS_DIR2_BLOCK_MAGIC) &&
(magic1 != XFS_DIR3_BLOCK_MAGIC) &&
(magic1 != XFS_DIR2_DATA_MAGIC) &&
(magic1 != XFS_DIR3_DATA_MAGIC) &&
(free->hdr.magic !=
cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
(free->hdr.magic !=
cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
trace_xfs_da_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
xfs_trans_brelse(trans, bp);
goto out_free;
}
}
*bpp = bp;
out_free:
if (mapp != &map)
......@@ -2679,7 +2630,6 @@ xfs_da_read_buf(
*/
xfs_daddr_t
xfs_da_reada_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
......@@ -2693,7 +2643,7 @@ xfs_da_reada_buf(
mapp = &map;
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
......
......@@ -25,6 +25,23 @@ struct xfs_trans;
struct zone;
struct xfs_dir_ops;
/*
* Directory/attribute geometry information. There will be one of these for each
* data fork type, and it will be passed around via the xfs_da_args. Global
* structures will be attached to the xfs_mount.
*/
struct xfs_da_geometry {
int blksize; /* da block size in bytes */
int fsbcount; /* da block size in filesystem blocks */
uint8_t fsblog; /* log2 of _filesystem_ block size */
uint8_t blklog; /* log2 of da block size */
uint node_ents; /* # of entries in a danode */
int magicpct; /* 37% of block size in bytes */
xfs_dablk_t datablk; /* blockno of dir data v2 */
xfs_dablk_t leafblk; /* blockno of leaf data v2 */
xfs_dablk_t freeblk; /* blockno of free data v2 */
};
/*========================================================================
* Btree searching and modification structure definitions.
*========================================================================*/
......@@ -42,6 +59,7 @@ enum xfs_dacmp {
* Structure to ease passing around component names.
*/
typedef struct xfs_da_args {
struct xfs_da_geometry *geo; /* da block geometry */
const __uint8_t *name; /* string (maybe not NULL terminated) */
int namelen; /* length of string (maybe no NULL) */
__uint8_t filetype; /* filetype of inode for directories */
......@@ -110,8 +128,6 @@ typedef struct xfs_da_state_path {
typedef struct xfs_da_state {
xfs_da_args_t *args; /* filename arguments */
struct xfs_mount *mp; /* filesystem mount point */
unsigned int blocksize; /* logical block size */
unsigned int node_ents; /* how many entries in danode */
xfs_da_state_path_t path; /* search/split paths */
xfs_da_state_path_t altpath; /* alternate path for join */
unsigned char inleaf; /* insert into 1->lf, 0->splf */
......@@ -185,9 +201,9 @@ int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
struct xfs_buf **bpp, int whichfork,
const struct xfs_buf_ops *ops);
xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mapped_bno,
int whichfork, const struct xfs_buf_ops *ops);
xfs_daddr_t xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
xfs_daddr_t mapped_bno, int whichfork,
const struct xfs_buf_ops *ops);
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf);
......
......@@ -26,8 +26,10 @@
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
/*
* Shortform directory ops
......@@ -425,9 +427,9 @@ xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
* Directory Leaf block operations
*/
static int
xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
xfs_dir2_max_leaf_ents(struct xfs_da_geometry *geo)
{
return (mp->m_dirblksize - sizeof(struct xfs_dir2_leaf_hdr)) /
return (geo->blksize - sizeof(struct xfs_dir2_leaf_hdr)) /
(uint)sizeof(struct xfs_dir2_leaf_entry);
}
......@@ -438,9 +440,9 @@ xfs_dir2_leaf_ents_p(struct xfs_dir2_leaf *lp)
}
static int
xfs_dir3_max_leaf_ents(struct xfs_mount *mp)
xfs_dir3_max_leaf_ents(struct xfs_da_geometry *geo)
{
return (mp->m_dirblksize - sizeof(struct xfs_dir3_leaf_hdr)) /
return (geo->blksize - sizeof(struct xfs_dir3_leaf_hdr)) /
(uint)sizeof(struct xfs_dir2_leaf_entry);
}
......@@ -591,9 +593,9 @@ xfs_da3_node_hdr_to_disk(
* Directory free space block operations
*/
static int
xfs_dir2_free_max_bests(struct xfs_mount *mp)
xfs_dir2_free_max_bests(struct xfs_da_geometry *geo)
{
return (mp->m_dirblksize - sizeof(struct xfs_dir2_free_hdr)) /
return (geo->blksize - sizeof(struct xfs_dir2_free_hdr)) /
sizeof(xfs_dir2_data_off_t);
}
......@@ -607,24 +609,25 @@ xfs_dir2_free_bests_p(struct xfs_dir2_free *free)
* Convert data space db to the corresponding free db.
*/
static xfs_dir2_db_t
xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
xfs_dir2_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
{
return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir2_free_max_bests(mp);
return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
(db / xfs_dir2_free_max_bests(geo));
}
/*
* Convert data space db to the corresponding index in a free db.
*/
static int
xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
xfs_dir2_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
{
return db % xfs_dir2_free_max_bests(mp);
return db % xfs_dir2_free_max_bests(geo);
}
static int
xfs_dir3_free_max_bests(struct xfs_mount *mp)
xfs_dir3_free_max_bests(struct xfs_da_geometry *geo)
{
return (mp->m_dirblksize - sizeof(struct xfs_dir3_free_hdr)) /
return (geo->blksize - sizeof(struct xfs_dir3_free_hdr)) /
sizeof(xfs_dir2_data_off_t);
}
......@@ -638,18 +641,19 @@ xfs_dir3_free_bests_p(struct xfs_dir2_free *free)
* Convert data space db to the corresponding free db.
*/
static xfs_dir2_db_t
xfs_dir3_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
xfs_dir3_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
{
return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir3_free_max_bests(mp);
return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
(db / xfs_dir3_free_max_bests(geo));
}
/*
* Convert data space db to the corresponding index in a free db.
*/
static int
xfs_dir3_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
xfs_dir3_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
{
return db % xfs_dir3_free_max_bests(mp);
return db % xfs_dir3_free_max_bests(geo);
}
static void
......
......@@ -19,10 +19,6 @@
#ifndef __XFS_DA_FORMAT_H__
#define __XFS_DA_FORMAT_H__
/*========================================================================
* Directory Structure when greater than XFS_LBSIZE(mp) bytes.
*========================================================================*/
/*
* This structure is common to both leaf nodes and non-leaf nodes in the Btree.
*
......@@ -122,8 +118,6 @@ struct xfs_da3_icnode_hdr {
__uint16_t level;
};
#define XFS_LBSIZE(mp) (mp)->m_sb.sb_blocksize
/*
* Directory version 2.
*
......@@ -330,8 +324,6 @@ xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr)
#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
#define XFS_DIR2_DATA_SPACE 0
#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
#define XFS_DIR2_DATA_FIRSTDB(mp) \
xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET)
/*
* Describe a free area in the data block.
......@@ -456,8 +448,6 @@ xfs_dir2_data_unused_tag_p(struct xfs_dir2_data_unused *dup)
*/
#define XFS_DIR2_LEAF_SPACE 1
#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
#define XFS_DIR2_LEAF_FIRSTDB(mp) \
xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET)
/*
* Leaf block header.
......@@ -513,17 +503,6 @@ struct xfs_dir3_leaf {
#define XFS_DIR3_LEAF_CRC_OFF offsetof(struct xfs_dir3_leaf_hdr, info.crc)
/*
* Get address of the bestcount field in the single-leaf block.
*/
static inline struct xfs_dir2_leaf_tail *
xfs_dir2_leaf_tail_p(struct xfs_mount *mp, struct xfs_dir2_leaf *lp)
{
return (struct xfs_dir2_leaf_tail *)
((char *)lp + mp->m_dirblksize -
sizeof(struct xfs_dir2_leaf_tail));
}
/*
* Get address of the bests array in the single-leaf block.
*/
......@@ -533,123 +512,6 @@ xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp)
return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
}
/*
* DB blocks here are logical directory block numbers, not filesystem blocks.
*/
/*
* Convert dataptr to byte in file space
*/
static inline xfs_dir2_off_t
xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
{
return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
}
/*
* Convert byte in file space to dataptr. It had better be aligned.
*/
static inline xfs_dir2_dataptr_t
xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
{
return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
}
/*
* Convert byte in space to (DB) block
*/
static inline xfs_dir2_db_t
xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
{
return (xfs_dir2_db_t)
(by >> (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog));
}
/*
* Convert dataptr to a block number
*/
static inline xfs_dir2_db_t
xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
{
return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp));
}
/*
* Convert byte in space to offset in a block
*/
static inline xfs_dir2_data_aoff_t
xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
{
return (xfs_dir2_data_aoff_t)(by &
((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) - 1));
}
/*
* Convert dataptr to a byte offset in a block
*/
static inline xfs_dir2_data_aoff_t
xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
{
return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp));
}
/*
* Convert block and offset to byte in space
*/
static inline xfs_dir2_off_t
xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
xfs_dir2_data_aoff_t o)
{
return ((xfs_dir2_off_t)db <<
(mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) + o;
}
/*
* Convert block (DB) to block (dablk)
*/
static inline xfs_dablk_t
xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
{
return (xfs_dablk_t)(db << mp->m_sb.sb_dirblklog);
}
/*
* Convert byte in space to (DA) block
*/
static inline xfs_dablk_t
xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by)
{
return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by));
}
/*
* Convert block and offset to dataptr
*/
static inline xfs_dir2_dataptr_t
xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
xfs_dir2_data_aoff_t o)
{
return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o));
}
/*
* Convert block (dablk) to block (DB)
*/
static inline xfs_dir2_db_t
xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
{
return (xfs_dir2_db_t)(da >> mp->m_sb.sb_dirblklog);
}
/*
* Convert block (dablk) to byte offset in space
*/
static inline xfs_dir2_off_t
xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
{
return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0);
}
/*
* Free space block defintions for the node format.
*/
......@@ -659,8 +521,6 @@ xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
*/
#define XFS_DIR2_FREE_SPACE 2
#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
#define XFS_DIR2_FREE_FIRSTDB(mp) \
xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET)
typedef struct xfs_dir2_free_hdr {
__be32 magic; /* XFS_DIR2_FREE_MAGIC */
......@@ -735,16 +595,6 @@ typedef struct xfs_dir2_block_tail {
__be32 stale; /* count of stale lf entries */
} xfs_dir2_block_tail_t;
/*
* Pointer to the leaf header embedded in a data block (1-block format)
*/
static inline struct xfs_dir2_block_tail *
xfs_dir2_block_tail_p(struct xfs_mount *mp, struct xfs_dir2_data_hdr *hdr)
{
return ((struct xfs_dir2_block_tail *)
((char *)hdr + mp->m_dirblksize)) - 1;
}
/*
* Pointer to the leaf entries embedded in a data block (1-block format)
*/
......@@ -764,10 +614,6 @@ xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
* of an attribute name may not be unique, we may have duplicate keys. The
* internal links in the Btree are logical block offsets into the file.
*
*========================================================================
* Attribute structure when equal to XFS_LBSIZE(mp) bytes.
*========================================================================
*
* Struct leaf_entry's are packed from the top. Name/values grow from the
* bottom but are not packed. The freemap contains run-length-encoded entries
* for the free bytes after the leaf_entry's, but only the N largest such,
......
......@@ -85,38 +85,74 @@ static struct xfs_nameops xfs_ascii_ci_nameops = {
.compname = xfs_ascii_ci_compname,
};
void
xfs_dir_mount(
xfs_mount_t *mp)
int
xfs_da_mount(
struct xfs_mount *mp)
{
struct xfs_da_geometry *dageo;
int nodehdr_size;
ASSERT(xfs_sb_version_hasdirv2(&mp->m_sb));
ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT);
ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
XFS_MAX_BLOCKSIZE);
mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog);
mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog;
mp->m_dirdatablk = xfs_dir2_db_to_da(mp, XFS_DIR2_DATA_FIRSTDB(mp));
mp->m_dirleafblk = xfs_dir2_db_to_da(mp, XFS_DIR2_LEAF_FIRSTDB(mp));
mp->m_dirfreeblk = xfs_dir2_db_to_da(mp, XFS_DIR2_FREE_FIRSTDB(mp));
nodehdr_size = mp->m_dir_inode_ops->node_hdr_size;
mp->m_attr_node_ents = (mp->m_sb.sb_blocksize - nodehdr_size) /
mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
KM_SLEEP | KM_MAYFAIL);
mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
KM_SLEEP | KM_MAYFAIL);
if (!mp->m_dir_geo || !mp->m_attr_geo) {
kmem_free(mp->m_dir_geo);
kmem_free(mp->m_attr_geo);
return ENOMEM;
}
/* set up directory geometry */
dageo = mp->m_dir_geo;
dageo->blklog = mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog;
dageo->fsblog = mp->m_sb.sb_blocklog;
dageo->blksize = 1 << dageo->blklog;
dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog;
/*
* Now we've set up the block conversion variables, we can calculate the
* segment block constants using the geometry structure.
*/
dageo->datablk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_DATA_OFFSET);
dageo->leafblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_LEAF_OFFSET);
dageo->freeblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_FREE_OFFSET);
dageo->node_ents = (dageo->blksize - nodehdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
mp->m_dir_node_ents = (mp->m_dirblksize - nodehdr_size) /
dageo->magicpct = (dageo->blksize * 37) / 100;
/* set up attribute geometry - single fsb only */
dageo = mp->m_attr_geo;
dageo->blklog = mp->m_sb.sb_blocklog;
dageo->fsblog = mp->m_sb.sb_blocklog;
dageo->blksize = 1 << dageo->blklog;
dageo->fsbcount = 1;
dageo->node_ents = (dageo->blksize - nodehdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
dageo->magicpct = (dageo->blksize * 37) / 100;
mp->m_dir_magicpct = (mp->m_dirblksize * 37) / 100;
if (xfs_sb_version_hasasciici(&mp->m_sb))
mp->m_dirnameops = &xfs_ascii_ci_nameops;
else
mp->m_dirnameops = &xfs_default_nameops;
return 0;
}
void
xfs_da_unmount(
struct xfs_mount *mp)
{
kmem_free(mp->m_dir_geo);
kmem_free(mp->m_attr_geo);
}
/*
......@@ -192,6 +228,7 @@ xfs_dir_init(
if (!args)
return ENOMEM;
args->geo = dp->i_mount->m_dir_geo;
args->dp = dp;
args->trans = tp;
error = xfs_dir2_sf_create(args, pdp->i_ino);
......@@ -226,6 +263,7 @@ xfs_dir_createname(
if (!args)
return ENOMEM;
args->geo = dp->i_mount->m_dir_geo;
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
......@@ -244,7 +282,7 @@ xfs_dir_createname(
goto out_free;
}
rval = xfs_dir2_isblock(tp, dp, &v);
rval = xfs_dir2_isblock(args, &v);
if (rval)
goto out_free;
if (v) {
......@@ -252,7 +290,7 @@ xfs_dir_createname(
goto out_free;
}
rval = xfs_dir2_isleaf(tp, dp, &v);
rval = xfs_dir2_isleaf(args, &v);
if (rval)
goto out_free;
if (v)
......@@ -320,6 +358,7 @@ xfs_dir_lookup(
* annotations into the reclaim path for the ilock.
*/
args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
args->geo = dp->i_mount->m_dir_geo;
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
......@@ -336,7 +375,7 @@ xfs_dir_lookup(
goto out_check_rval;
}
rval = xfs_dir2_isblock(tp, dp, &v);
rval = xfs_dir2_isblock(args, &v);
if (rval)
goto out_free;
if (v) {
......@@ -344,7 +383,7 @@ xfs_dir_lookup(
goto out_check_rval;
}
rval = xfs_dir2_isleaf(tp, dp, &v);
rval = xfs_dir2_isleaf(args, &v);
if (rval)
goto out_free;
if (v)
......@@ -391,6 +430,7 @@ xfs_dir_removename(
if (!args)
return ENOMEM;
args->geo = dp->i_mount->m_dir_geo;
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
......@@ -408,7 +448,7 @@ xfs_dir_removename(
goto out_free;
}
rval = xfs_dir2_isblock(tp, dp, &v);
rval = xfs_dir2_isblock(args, &v);
if (rval)
goto out_free;
if (v) {
......@@ -416,7 +456,7 @@ xfs_dir_removename(
goto out_free;
}
rval = xfs_dir2_isleaf(tp, dp, &v);
rval = xfs_dir2_isleaf(args, &v);
if (rval)
goto out_free;
if (v)
......@@ -455,6 +495,7 @@ xfs_dir_replace(
if (!args)
return ENOMEM;
args->geo = dp->i_mount->m_dir_geo;
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
......@@ -472,7 +513,7 @@ xfs_dir_replace(
goto out_free;
}
rval = xfs_dir2_isblock(tp, dp, &v);
rval = xfs_dir2_isblock(args, &v);
if (rval)
goto out_free;
if (v) {
......@@ -480,7 +521,7 @@ xfs_dir_replace(
goto out_free;
}
rval = xfs_dir2_isleaf(tp, dp, &v);
rval = xfs_dir2_isleaf(args, &v);
if (rval)
goto out_free;
if (v)
......@@ -516,6 +557,7 @@ xfs_dir_canenter(
if (!args)
return ENOMEM;
args->geo = dp->i_mount->m_dir_geo;
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
......@@ -531,7 +573,7 @@ xfs_dir_canenter(
goto out_free;
}
rval = xfs_dir2_isblock(tp, dp, &v);
rval = xfs_dir2_isblock(args, &v);
if (rval)
goto out_free;
if (v) {
......@@ -539,7 +581,7 @@ xfs_dir_canenter(
goto out_free;
}
rval = xfs_dir2_isleaf(tp, dp, &v);
rval = xfs_dir2_isleaf(args, &v);
if (rval)
goto out_free;
if (v)
......@@ -579,13 +621,13 @@ xfs_dir2_grow_inode(
* Set lowest possible block in the space requested.
*/
bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE);
count = mp->m_dirblkfsbs;
count = args->geo->fsbcount;
error = xfs_da_grow_inode_int(args, &bno, count);
if (error)
return error;
*dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno);
*dbp = xfs_dir2_da_to_db(args->geo, (xfs_dablk_t)bno);
/*
* Update file's size if this is the data space and it grew.
......@@ -607,19 +649,16 @@ xfs_dir2_grow_inode(
*/
int
xfs_dir2_isblock(
xfs_trans_t *tp,
xfs_inode_t *dp,
struct xfs_da_args *args,
int *vp) /* out: 1 is block, 0 is not block */
{
xfs_fileoff_t last; /* last file offset */
xfs_mount_t *mp;
int rval;
mp = dp->i_mount;
if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK)))
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(mp, last) == mp->m_dirblksize;
ASSERT(rval == 0 || dp->i_d.di_size == mp->m_dirblksize);
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
*vp = rval;
return 0;
}
......@@ -629,18 +668,15 @@ xfs_dir2_isblock(
*/
int
xfs_dir2_isleaf(
xfs_trans_t *tp,
xfs_inode_t *dp,
int *vp) /* out: 1 is leaf, 0 is not leaf */
struct xfs_da_args *args,
int *vp) /* out: 1 is block, 0 is not block */
{
xfs_fileoff_t last; /* last file offset */
xfs_mount_t *mp;
int rval;
mp = dp->i_mount;
if ((rval = xfs_bmap_last_offset(tp, dp, &last, XFS_DATA_FORK)))
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
*vp = last == mp->m_dirleafblk + (1 << mp->m_sb.sb_dirblklog);
*vp = last == args->geo->leafblk + args->geo->fsbcount;
return 0;
}
......@@ -668,11 +704,11 @@ xfs_dir2_shrink_inode(
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
da = xfs_dir2_db_to_da(mp, db);
da = xfs_dir2_db_to_da(args->geo, db);
/*
* Unmap the fsblock(s).
*/
if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
if ((error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount,
XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
&done))) {
/*
......@@ -699,12 +735,12 @@ xfs_dir2_shrink_inode(
/*
* If it's not a data block, we're done.
*/
if (db >= XFS_DIR2_LEAF_FIRSTDB(mp))
if (db >= xfs_dir2_byte_to_db(args->geo, XFS_DIR2_LEAF_OFFSET))
return 0;
/*
* If the block isn't the last one in the directory, we're done.
*/
if (dp->i_d.di_size > xfs_dir2_db_off_to_byte(mp, db + 1, 0))
if (dp->i_d.di_size > xfs_dir2_db_off_to_byte(args->geo, db + 1, 0))
return 0;
bno = da;
if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) {
......@@ -713,7 +749,7 @@ xfs_dir2_shrink_inode(
*/
return error;
}
if (db == mp->m_dirdatablk)
if (db == args->geo->datablk)
ASSERT(bno == 0);
else
ASSERT(bno > 0);
......
......@@ -80,7 +80,7 @@ struct xfs_dir_ops {
struct xfs_dir3_icleaf_hdr *from);
void (*leaf_hdr_from_disk)(struct xfs_dir3_icleaf_hdr *to,
struct xfs_dir2_leaf *from);
int (*leaf_max_ents)(struct xfs_mount *mp);
int (*leaf_max_ents)(struct xfs_da_geometry *geo);
struct xfs_dir2_leaf_entry *
(*leaf_ents_p)(struct xfs_dir2_leaf *lp);
......@@ -97,10 +97,12 @@ struct xfs_dir_ops {
struct xfs_dir3_icfree_hdr *from);
void (*free_hdr_from_disk)(struct xfs_dir3_icfree_hdr *to,
struct xfs_dir2_free *from);
int (*free_max_bests)(struct xfs_mount *mp);
int (*free_max_bests)(struct xfs_da_geometry *geo);
__be16 * (*free_bests_p)(struct xfs_dir2_free *free);
xfs_dir2_db_t (*db_to_fdb)(struct xfs_mount *mp, xfs_dir2_db_t db);
int (*db_to_fdindex)(struct xfs_mount *mp, xfs_dir2_db_t db);
xfs_dir2_db_t (*db_to_fdb)(struct xfs_da_geometry *geo,
xfs_dir2_db_t db);
int (*db_to_fdindex)(struct xfs_da_geometry *geo,
xfs_dir2_db_t db);
};
extern const struct xfs_dir_ops *
......@@ -112,7 +114,9 @@ extern const struct xfs_dir_ops *
* Generic directory interface routines
*/
extern void xfs_dir_startup(void);
extern void xfs_dir_mount(struct xfs_mount *mp);
extern int xfs_da_mount(struct xfs_mount *mp);
extern void xfs_da_unmount(struct xfs_mount *mp);
extern int xfs_dir_isempty(struct xfs_inode *dp);
extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_inode *pdp);
......@@ -142,23 +146,23 @@ extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
/*
* Interface routines used by userspace utilities
*/
extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
extern int xfs_dir2_isblock(struct xfs_da_args *args, int *r);
extern int xfs_dir2_isleaf(struct xfs_da_args *args, int *r);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_buf *bp);
extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
struct xfs_dir2_data_hdr *hdr, int *loghead);
extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_inode *dp,
extern void xfs_dir2_data_log_entry(struct xfs_da_args *args,
struct xfs_buf *bp, struct xfs_dir2_data_entry *dep);
extern void xfs_dir2_data_log_header(struct xfs_trans *tp, struct xfs_inode *dp,
extern void xfs_dir2_data_log_header(struct xfs_da_args *args,
struct xfs_buf *bp);
extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_unused *dup);
extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_inode *dp,
extern void xfs_dir2_data_log_unused(struct xfs_da_args *args,
struct xfs_buf *bp, struct xfs_dir2_data_unused *dup);
extern void xfs_dir2_data_make_free(struct xfs_da_args *args,
struct xfs_buf *bp, xfs_dir2_data_aoff_t offset,
xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_inode *dp,
extern void xfs_dir2_data_use_free(struct xfs_da_args *args,
struct xfs_buf *bp, struct xfs_dir2_data_unused *dup,
xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
int *needlogp, int *needscanp);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -353,10 +353,10 @@ xfs_qm_dqalloc(
dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
0);
error = xfs_buf_geterror(bp);
if (error)
if (!bp) {
error = ENOMEM;
goto error1;
}
bp->b_ops = &xfs_dquot_buf_ops;
/*
......@@ -832,47 +832,6 @@ xfs_qm_dqget(
return (0);
}
STATIC void
xfs_qm_dqput_final(
struct xfs_dquot *dqp)
{
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
struct xfs_dquot *gdqp;
struct xfs_dquot *pdqp;
trace_xfs_dqput_free(dqp);
if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(xs_qm_dquot_unused);
/*
* If we just added a udquot to the freelist, then we want to release
* the gdquot/pdquot reference that it (probably) has. Otherwise it'll
* keep the gdquot/pdquot from getting reclaimed.
*/
gdqp = dqp->q_gdquot;
if (gdqp) {
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
pdqp = dqp->q_pdquot;
if (pdqp) {
xfs_dqlock(pdqp);
dqp->q_pdquot = NULL;
}
xfs_dqunlock(dqp);
/*
* If we had a group/project quota hint, release it now.
*/
if (gdqp)
xfs_qm_dqput(gdqp);
if (pdqp)
xfs_qm_dqput(pdqp);
}
/*
* Release a reference to the dquot (decrement ref-count) and unlock it.
*
......@@ -888,10 +847,14 @@ xfs_qm_dqput(
trace_xfs_dqput(dqp);
if (--dqp->q_nrefs > 0)
if (--dqp->q_nrefs == 0) {
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
trace_xfs_dqput_free(dqp);
if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(xs_qm_dquot_unused);
}
xfs_dqunlock(dqp);
else
xfs_qm_dqput_final(dqp);
}
/*
......
......@@ -52,8 +52,6 @@ typedef struct xfs_dquot {
int q_bufoffset; /* off of dq in buffer (# dquots) */
xfs_fileoff_t q_fileoffset; /* offset in quotas file */
struct xfs_dquot*q_gdquot; /* group dquot, hint only */
struct xfs_dquot*q_pdquot; /* project dquot, hint only */
xfs_disk_dquot_t q_core; /* actual usage & quotas */
xfs_dq_logitem_t q_logitem; /* dquot log item */
xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
......
This diff is collapsed.
......@@ -944,7 +944,7 @@ xfs_dir_open(
*/
mode = xfs_ilock_data_map_shared(ip);
if (ip->i_d.di_nextents > 0)
xfs_dir3_data_readahead(NULL, ip, 0, -1);
xfs_dir3_data_readahead(ip, 0, -1);
xfs_iunlock(ip, mode);
return 0;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment