Commit fb1cb7b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: remove incorrect assert in xfs_vm_writepage
  xfs: use hlist_add_fake
  xfs: fix a few compiler warnings with CONFIG_XFS_QUOTA=n
  xfs: tell lockdep about parent iolock usage in filestreams
  xfs: move delayed write buffer trace
  xfs: fix per-ag reference counting in inode reclaim tree walking
  xfs: xfs_ioctl: fix information leak to userland
  xfs: remove experimental tag from the delaylog option
parents fe7e96f6 ece413f5
...@@ -794,17 +794,6 @@ designed. ...@@ -794,17 +794,6 @@ designed.
Roadmap: Roadmap:
2.6.37 Remove experimental tag from mount option
=> should be roughly 6 months after initial merge
=> enough time to:
=> gain confidence and fix problems reported by early
adopters (a.k.a. guinea pigs)
=> address worst performance regressions and undesired
behaviours
=> start tuning/optimising code for parallelism
=> start tuning/optimising algorithms consuming
excessive CPU time
2.6.39 Switch default mount option to use delayed logging 2.6.39 Switch default mount option to use delayed logging
=> should be roughly 12 months after initial merge => should be roughly 12 months after initial merge
=> enough time to shake out remaining problems before next round of => enough time to shake out remaining problems before next round of
......
...@@ -1111,11 +1111,12 @@ xfs_vm_writepage( ...@@ -1111,11 +1111,12 @@ xfs_vm_writepage(
uptodate = 0; uptodate = 0;
/* /*
* A hole may still be marked uptodate because discard_buffer * set_page_dirty dirties all buffers in a page, independent
* leaves the flag set. * of their state. The dirty state however is entirely
* meaningless for holes (!mapped && uptodate), so skip
* buffers covering holes here.
*/ */
if (!buffer_mapped(bh) && buffer_uptodate(bh)) { if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
ASSERT(!buffer_dirty(bh));
imap_valid = 0; imap_valid = 0;
continue; continue;
} }
......
...@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split( ...@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split(
INIT_LIST_HEAD(list); INIT_LIST_HEAD(list);
spin_lock(dwlk); spin_lock(dwlk);
list_for_each_entry_safe(bp, n, dwq, b_list) { list_for_each_entry_safe(bp, n, dwq, b_list) {
trace_xfs_buf_delwri_split(bp, _RET_IP_);
ASSERT(bp->b_flags & XBF_DELWRI); ASSERT(bp->b_flags & XBF_DELWRI);
if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
...@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split( ...@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split(
_XBF_RUN_QUEUES); _XBF_RUN_QUEUES);
bp->b_flags |= XBF_WRITE; bp->b_flags |= XBF_WRITE;
list_move_tail(&bp->b_list, list); list_move_tail(&bp->b_list, list);
trace_xfs_buf_delwri_split(bp, _RET_IP_);
} else } else
skipped++; skipped++;
} }
......
...@@ -416,7 +416,7 @@ xfs_attrlist_by_handle( ...@@ -416,7 +416,7 @@ xfs_attrlist_by_handle(
if (IS_ERR(dentry)) if (IS_ERR(dentry))
return PTR_ERR(dentry); return PTR_ERR(dentry);
kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
if (!kbuf) if (!kbuf)
goto out_dput; goto out_dput;
......
...@@ -762,7 +762,8 @@ xfs_setup_inode( ...@@ -762,7 +762,8 @@ xfs_setup_inode(
inode->i_state = I_NEW; inode->i_state = I_NEW;
inode_sb_list_add(inode); inode_sb_list_add(inode);
insert_inode_hash(inode); /* make the inode look hashed for the writeback code */
hlist_add_fake(&inode->i_hash);
inode->i_mode = ip->i_d.di_mode; inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink; inode->i_nlink = ip->i_d.di_nlink;
......
...@@ -353,9 +353,6 @@ xfs_parseargs( ...@@ -353,9 +353,6 @@ xfs_parseargs(
mp->m_qflags &= ~XFS_OQUOTA_ENFD; mp->m_qflags &= ~XFS_OQUOTA_ENFD;
} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
mp->m_flags |= XFS_MOUNT_DELAYLOG; mp->m_flags |= XFS_MOUNT_DELAYLOG;
cmn_err(CE_WARN,
"Enabling EXPERIMENTAL delayed logging feature "
"- use at your own risk.\n");
} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
mp->m_flags &= ~XFS_MOUNT_DELAYLOG; mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
} else if (!strcmp(this_char, "ihashsize")) { } else if (!strcmp(this_char, "ihashsize")) {
......
...@@ -853,6 +853,7 @@ xfs_reclaim_inodes_ag( ...@@ -853,6 +853,7 @@ xfs_reclaim_inodes_ag(
if (trylock) { if (trylock) {
if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
skipped++; skipped++;
xfs_perag_put(pag);
continue; continue;
} }
first_index = pag->pag_ici_reclaim_cursor; first_index = pag->pag_ici_reclaim_cursor;
......
...@@ -744,9 +744,15 @@ xfs_filestream_new_ag( ...@@ -744,9 +744,15 @@ xfs_filestream_new_ag(
* If the file's parent directory is known, take its iolock in exclusive * If the file's parent directory is known, take its iolock in exclusive
* mode to prevent two sibling files from racing each other to migrate * mode to prevent two sibling files from racing each other to migrate
* themselves and their parent to different AGs. * themselves and their parent to different AGs.
*
* Note that we lock the parent directory iolock inside the child
* iolock here. That's fine as we never hold both parent and child
* iolock in any other place. This is different from the ilock,
* which requires locking of the child after the parent for namespace
* operations.
*/ */
if (pip) if (pip)
xfs_ilock(pip, XFS_IOLOCK_EXCL); xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
/* /*
* A new AG needs to be found for the file. If the file's parent * A new AG needs to be found for the file. If the file's parent
......
...@@ -275,6 +275,7 @@ xfs_free_perag( ...@@ -275,6 +275,7 @@ xfs_free_perag(
pag = radix_tree_delete(&mp->m_perag_tree, agno); pag = radix_tree_delete(&mp->m_perag_tree, agno);
spin_unlock(&mp->m_perag_lock); spin_unlock(&mp->m_perag_lock);
ASSERT(pag); ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
call_rcu(&pag->rcu_head, __xfs_free_perag); call_rcu(&pag->rcu_head, __xfs_free_perag);
} }
} }
......
...@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, ...@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
#define xfs_trans_apply_dquot_deltas(tp) #define xfs_trans_apply_dquot_deltas(tp)
#define xfs_trans_unreserve_and_mod_dquots(tp) #define xfs_trans_unreserve_and_mod_dquots(tp)
#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) struct xfs_inode *ip, long nblks, long ninos, uint flags)
{
return 0;
}
static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
struct xfs_mount *mp, struct xfs_dquot *udqp,
struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
{
return 0;
}
#define xfs_qm_vop_create_dqattach(tp, ip, u, g) #define xfs_qm_vop_create_dqattach(tp, ip, u, g)
#define xfs_qm_vop_rename_dqattach(it) (0) #define xfs_qm_vop_rename_dqattach(it) (0)
#define xfs_qm_vop_chown(tp, ip, old, new) (NULL) #define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
...@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, ...@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
#define xfs_qm_dqdetach(ip) #define xfs_qm_dqdetach(ip)
#define xfs_qm_dqrele(d) #define xfs_qm_dqrele(d)
#define xfs_qm_statvfs(ip, s) #define xfs_qm_statvfs(ip, s)
#define xfs_qm_sync(mp, fl) (0) static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
{
return 0;
}
#define xfs_qm_newmount(mp, a, b) (0) #define xfs_qm_newmount(mp, a, b) (0)
#define xfs_qm_mount_quotas(mp) #define xfs_qm_mount_quotas(mp)
#define xfs_qm_unmount(mp) #define xfs_qm_unmount(mp)
#define xfs_qm_unmount_quotas(mp) (0) #define xfs_qm_unmount_quotas(mp)
#endif /* CONFIG_XFS_QUOTA */ #endif /* CONFIG_XFS_QUOTA */
#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment