Commit e8d256ac authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.10-rc4' of git://oss.sgi.com/xfs/xfs

Pull xfs fixes from Ben Myers:
 - Fix nested transactions in xfs_qm_scall_setqlim
 - Clear suid/sgid bits when we truncate with size update
 - Fix recovery for split buffers
 - Fix block count on remote symlinks
 - Add fsgeom flag for v5 superblock support
 - Disable XFS_IOC_SWAPEXT for CRC enabled filesystems
 - Fix dirv3 freespace block corruption

* tag 'for-linus-v3.10-rc4' of git://oss.sgi.com/xfs/xfs:
  xfs: fix dir3 freespace block corruption
  xfs: disable swap extents ioctl on CRC enabled filesystems
  xfs: add fsgeom flag for v5 superblock support.
  xfs: fix incorrect remote symlink block count
  xfs: fix split buffer vector log recovery support
  xfs: kill suid/sgid through the truncate path.
  xfs: avoid nesting transactions in xfs_qm_scall_setqlim()
parents 977b55cf e400d27d
...@@ -262,12 +262,7 @@ xfs_buf_item_format_segment( ...@@ -262,12 +262,7 @@ xfs_buf_item_format_segment(
vecp->i_addr = xfs_buf_offset(bp, buffer_offset); vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK; vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK; vecp->i_type = XLOG_REG_TYPE_BCHUNK;
/* nvecs++;
* You would think we need to bump the nvecs here too, but we do not
* this number is used by recovery, and it gets confused by the boundary
* split here
* nvecs++;
*/
vecp++; vecp++;
first_bit = next_bit; first_bit = next_bit;
last_bit = next_bit; last_bit = next_bit;
......
...@@ -219,6 +219,14 @@ xfs_swap_extents( ...@@ -219,6 +219,14 @@ xfs_swap_extents(
int taforkblks = 0; int taforkblks = 0;
__uint64_t tmp; __uint64_t tmp;
/*
* We have no way of updating owner information in the BMBT blocks for
* each inode on CRC enabled filesystems, so to avoid corrupting the
* this metadata we simply don't allow extent swaps to occur.
*/
if (xfs_sb_version_hascrc(&mp->m_sb))
return XFS_ERROR(EINVAL);
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
if (!tempifp) { if (!tempifp) {
error = XFS_ERROR(ENOMEM); error = XFS_ERROR(ENOMEM);
......
...@@ -715,6 +715,7 @@ struct xfs_dir3_free_hdr { ...@@ -715,6 +715,7 @@ struct xfs_dir3_free_hdr {
__be32 firstdb; /* db of first entry */ __be32 firstdb; /* db of first entry */
__be32 nvalid; /* count of valid entries */ __be32 nvalid; /* count of valid entries */
__be32 nused; /* count of used entries */ __be32 nused; /* count of used entries */
__be32 pad; /* 64 bit alignment. */
}; };
struct xfs_dir3_free { struct xfs_dir3_free {
......
...@@ -263,18 +263,19 @@ xfs_dir3_free_get_buf( ...@@ -263,18 +263,19 @@ xfs_dir3_free_get_buf(
* Initialize the new block to be empty, and remember * Initialize the new block to be empty, and remember
* its first slot as our empty slot. * its first slot as our empty slot.
*/ */
hdr.magic = XFS_DIR2_FREE_MAGIC; memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr));
hdr.firstdb = 0; memset(&hdr, 0, sizeof(hdr));
hdr.nused = 0;
hdr.nvalid = 0;
if (xfs_sb_version_hascrc(&mp->m_sb)) { if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
hdr.magic = XFS_DIR3_FREE_MAGIC; hdr.magic = XFS_DIR3_FREE_MAGIC;
hdr3->hdr.blkno = cpu_to_be64(bp->b_bn); hdr3->hdr.blkno = cpu_to_be64(bp->b_bn);
hdr3->hdr.owner = cpu_to_be64(dp->i_ino); hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid); uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
} } else
hdr.magic = XFS_DIR2_FREE_MAGIC;
xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr); xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
*bpp = bp; *bpp = bp;
return 0; return 0;
...@@ -1921,8 +1922,6 @@ xfs_dir2_node_addname_int( ...@@ -1921,8 +1922,6 @@ xfs_dir2_node_addname_int(
*/ */
freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
xfs_dir3_free_max_bests(mp); xfs_dir3_free_max_bests(mp);
free->hdr.nvalid = 0;
free->hdr.nused = 0;
} else { } else {
free = fbp->b_addr; free = fbp->b_addr;
bests = xfs_dir3_free_bests_p(mp, free); bests = xfs_dir3_free_bests_p(mp, free);
......
...@@ -236,6 +236,7 @@ typedef struct xfs_fsop_resblks { ...@@ -236,6 +236,7 @@ typedef struct xfs_fsop_resblks {
#define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */ #define XFS_FSOP_GEOM_FLAGS_PROJID32 0x0800 /* 32-bit project IDs */
#define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */ #define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */
#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */ #define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */
#define XFS_FSOP_GEOM_FLAGS_V5SB 0x8000 /* version 5 superblock */
/* /*
......
...@@ -99,7 +99,9 @@ xfs_fs_geometry( ...@@ -99,7 +99,9 @@ xfs_fs_geometry(
(xfs_sb_version_hasattr2(&mp->m_sb) ? (xfs_sb_version_hasattr2(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) | XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
(xfs_sb_version_hasprojid32bit(&mp->m_sb) ? (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_PROJID32 : 0); XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
(xfs_sb_version_hascrc(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_V5SB : 0);
geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ? geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
mp->m_sb.sb_logsectsize : BBSIZE; mp->m_sb.sb_logsectsize : BBSIZE;
geo->rtsectsize = mp->m_sb.sb_blocksize; geo->rtsectsize = mp->m_sb.sb_blocksize;
......
...@@ -455,6 +455,28 @@ xfs_vn_getattr( ...@@ -455,6 +455,28 @@ xfs_vn_getattr(
return 0; return 0;
} }
static void
xfs_setattr_mode(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct iattr *iattr)
{
struct inode *inode = VFS_I(ip);
umode_t mode = iattr->ia_mode;
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
ip->i_d.di_mode &= S_IFMT;
ip->i_d.di_mode |= mode & ~S_IFMT;
inode->i_mode &= S_IFMT;
inode->i_mode |= mode & ~S_IFMT;
}
int int
xfs_setattr_nonsize( xfs_setattr_nonsize(
struct xfs_inode *ip, struct xfs_inode *ip,
...@@ -606,18 +628,8 @@ xfs_setattr_nonsize( ...@@ -606,18 +628,8 @@ xfs_setattr_nonsize(
/* /*
* Change file access modes. * Change file access modes.
*/ */
if (mask & ATTR_MODE) { if (mask & ATTR_MODE)
umode_t mode = iattr->ia_mode; xfs_setattr_mode(tp, ip, iattr);
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
ip->i_d.di_mode &= S_IFMT;
ip->i_d.di_mode |= mode & ~S_IFMT;
inode->i_mode &= S_IFMT;
inode->i_mode |= mode & ~S_IFMT;
}
/* /*
* Change file access or modified times. * Change file access or modified times.
...@@ -714,9 +726,8 @@ xfs_setattr_size( ...@@ -714,9 +726,8 @@ xfs_setattr_size(
return XFS_ERROR(error); return XFS_ERROR(error);
ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT(S_ISREG(ip->i_d.di_mode));
ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
if (!(flags & XFS_ATTR_NOLOCK)) { if (!(flags & XFS_ATTR_NOLOCK)) {
lock_flags |= XFS_IOLOCK_EXCL; lock_flags |= XFS_IOLOCK_EXCL;
...@@ -860,6 +871,12 @@ xfs_setattr_size( ...@@ -860,6 +871,12 @@ xfs_setattr_size(
xfs_inode_clear_eofblocks_tag(ip); xfs_inode_clear_eofblocks_tag(ip);
} }
/*
* Change file access modes.
*/
if (mask & ATTR_MODE)
xfs_setattr_mode(tp, ip, iattr);
if (mask & ATTR_CTIME) { if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime; inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
......
...@@ -2096,6 +2096,17 @@ xlog_recover_do_reg_buffer( ...@@ -2096,6 +2096,17 @@ xlog_recover_do_reg_buffer(
ASSERT(BBTOB(bp->b_io_length) >= ASSERT(BBTOB(bp->b_io_length) >=
((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
/*
* The dirty regions logged in the buffer, even though
* contiguous, may span multiple chunks. This is because the
* dirty region may span a physical page boundary in a buffer
* and hence be split into two separate vectors for writing into
* the log. Hence we need to trim nbits back to the length of
* the current region being copied out of the log.
*/
if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
/* /*
* Do a sanity check if this is a dquot buffer. Just checking * Do a sanity check if this is a dquot buffer. Just checking
* the first dquot in the buffer should do. XXXThis is * the first dquot in the buffer should do. XXXThis is
......
...@@ -489,31 +489,36 @@ xfs_qm_scall_setqlim( ...@@ -489,31 +489,36 @@ xfs_qm_scall_setqlim(
if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
return 0; return 0;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
0, 0, XFS_DEFAULT_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
return (error);
}
/* /*
* We don't want to race with a quotaoff so take the quotaoff lock. * We don't want to race with a quotaoff so take the quotaoff lock.
* (We don't hold an inode lock, so there's nothing else to stop * We don't hold an inode lock, so there's nothing else to stop
* a quotaoff from happening). (XXXThis doesn't currently happen * a quotaoff from happening.
* because we take the vfslock before calling xfs_qm_sysent).
*/ */
mutex_lock(&q->qi_quotaofflock); mutex_lock(&q->qi_quotaofflock);
/* /*
* Get the dquot (locked), and join it to the transaction. * Get the dquot (locked) before we start, as we need to do a
* Allocate the dquot if this doesn't exist. * transaction to allocate it if it doesn't exist. Once we have the
* dquot, unlock it so we can start the next transaction safely. We hold
* a reference to the dquot, so it's safe to do this unlock/lock without
* it being reclaimed in the mean time.
*/ */
if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
xfs_trans_cancel(tp, XFS_TRANS_ABORT); if (error) {
ASSERT(error != ENOENT); ASSERT(error != ENOENT);
goto out_unlock; goto out_unlock;
} }
xfs_dqunlock(dqp);
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
0, 0, XFS_DEFAULT_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
goto out_rele;
}
xfs_dqlock(dqp);
xfs_trans_dqjoin(tp, dqp); xfs_trans_dqjoin(tp, dqp);
ddq = &dqp->q_core; ddq = &dqp->q_core;
...@@ -621,9 +626,10 @@ xfs_qm_scall_setqlim( ...@@ -621,9 +626,10 @@ xfs_qm_scall_setqlim(
xfs_trans_log_dquot(tp, dqp); xfs_trans_log_dquot(tp, dqp);
error = xfs_trans_commit(tp, 0); error = xfs_trans_commit(tp, 0);
xfs_qm_dqrele(dqp);
out_unlock: out_rele:
xfs_qm_dqrele(dqp);
out_unlock:
mutex_unlock(&q->qi_quotaofflock); mutex_unlock(&q->qi_quotaofflock);
return error; return error;
} }
......
...@@ -56,16 +56,9 @@ xfs_symlink_blocks( ...@@ -56,16 +56,9 @@ xfs_symlink_blocks(
struct xfs_mount *mp, struct xfs_mount *mp,
int pathlen) int pathlen)
{ {
int fsblocks = 0; int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
int len = pathlen;
do { return (pathlen + buflen - 1) / buflen;
fsblocks++;
len -= XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
} while (len > 0);
ASSERT(fsblocks <= XFS_SYMLINK_MAPS);
return fsblocks;
} }
static int static int
...@@ -405,7 +398,7 @@ xfs_symlink( ...@@ -405,7 +398,7 @@ xfs_symlink(
if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version)) if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
fs_blocks = 0; fs_blocks = 0;
else else
fs_blocks = XFS_B_TO_FSB(mp, pathlen); fs_blocks = xfs_symlink_blocks(mp, pathlen);
resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
...@@ -512,7 +505,7 @@ xfs_symlink( ...@@ -512,7 +505,7 @@ xfs_symlink(
cur_chunk = target_path; cur_chunk = target_path;
offset = 0; offset = 0;
for (n = 0; n < nmaps; n++) { for (n = 0; n < nmaps; n++) {
char *buf; char *buf;
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
...@@ -525,9 +518,7 @@ xfs_symlink( ...@@ -525,9 +518,7 @@ xfs_symlink(
bp->b_ops = &xfs_symlink_buf_ops; bp->b_ops = &xfs_symlink_buf_ops;
byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt); byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
if (pathlen < byte_cnt) { byte_cnt = min(byte_cnt, pathlen);
byte_cnt = pathlen;
}
buf = bp->b_addr; buf = bp->b_addr;
buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset, buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
...@@ -542,6 +533,7 @@ xfs_symlink( ...@@ -542,6 +533,7 @@ xfs_symlink(
xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) - xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
(char *)bp->b_addr); (char *)bp->b_addr);
} }
ASSERT(pathlen == 0);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment