Commit 962bf3ea authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-for-linus-3.15-rc2' of git://oss.sgi.com/xfs/xfs

Pull xfs bug fixes from Dave Chinner:
 "The fixes are for data corruption issues, memory corruption and
  regressions for changes merged in -rc1.

  Data corruption fixes:
   - fix a bunch of delayed allocation state mismatches
   - fix collapse/zero range bugs
   - fix a direct IO block mapping bug @ EOF

  Other fixes:
   - fix a use after free on metadata IO error
   - fix a use after free on IO error during unmount
   - fix an incorrect error sign on direct IO write errors
   - add missing O_TMPFILE inode security context initialisation"

* tag 'xfs-for-linus-3.15-rc2' of git://oss.sgi.com/xfs/xfs:
  xfs: fix tmpfile/selinux deadlock and initialize security
  xfs: fix buffer use after free on IO error
  xfs: wrong error sign conversion during failed DIO writes
  xfs: unmount does not wait for shutdown during unmount
  xfs: collapse range is delalloc challenged
  xfs: don't map ranges that span EOF for direct IO
  xfs: zeroing space needs to punch delalloc blocks
  xfs: xfs_vm_write_end truncates too much on failure
  xfs: write failure beyond EOF truncates too much data
  xfs: kill buffers over failed write ranges properly
parents 7d77879b 330033d6
...@@ -1344,6 +1344,14 @@ __xfs_get_blocks( ...@@ -1344,6 +1344,14 @@ __xfs_get_blocks(
/* /*
* If this is O_DIRECT or the mpage code calling tell them how large * If this is O_DIRECT or the mpage code calling tell them how large
* the mapping is, so that we can avoid repeated get_blocks calls. * the mapping is, so that we can avoid repeated get_blocks calls.
*
* If the mapping spans EOF, then we have to break the mapping up as the
* mapping for blocks beyond EOF must be marked new so that sub block
* regions can be correctly zeroed. We can't do this for mappings within
* EOF unless the mapping was just allocated or is unwritten, otherwise
* the callers would overwrite existing data with zeros. Hence we have
* to split the mapping into a range up to and including EOF, and a
* second mapping for beyond EOF.
*/ */
if (direct || size > (1 << inode->i_blkbits)) { if (direct || size > (1 << inode->i_blkbits)) {
xfs_off_t mapping_size; xfs_off_t mapping_size;
...@@ -1354,6 +1362,12 @@ __xfs_get_blocks( ...@@ -1354,6 +1362,12 @@ __xfs_get_blocks(
ASSERT(mapping_size > 0); ASSERT(mapping_size > 0);
if (mapping_size > size) if (mapping_size > size)
mapping_size = size; mapping_size = size;
if (offset < i_size_read(inode) &&
offset + mapping_size >= i_size_read(inode)) {
/* limit mapping to block that spans EOF */
mapping_size = roundup_64(i_size_read(inode) - offset,
1 << inode->i_blkbits);
}
if (mapping_size > LONG_MAX) if (mapping_size > LONG_MAX)
mapping_size = LONG_MAX; mapping_size = LONG_MAX;
...@@ -1566,6 +1580,16 @@ xfs_vm_write_failed( ...@@ -1566,6 +1580,16 @@ xfs_vm_write_failed(
xfs_vm_kill_delalloc_range(inode, block_offset, xfs_vm_kill_delalloc_range(inode, block_offset,
block_offset + bh->b_size); block_offset + bh->b_size);
/*
* This buffer does not contain data anymore. make sure anyone
* who finds it knows that for certain.
*/
clear_buffer_delay(bh);
clear_buffer_uptodate(bh);
clear_buffer_mapped(bh);
clear_buffer_new(bh);
clear_buffer_dirty(bh);
} }
} }
...@@ -1599,12 +1623,21 @@ xfs_vm_write_begin( ...@@ -1599,12 +1623,21 @@ xfs_vm_write_begin(
status = __block_write_begin(page, pos, len, xfs_get_blocks); status = __block_write_begin(page, pos, len, xfs_get_blocks);
if (unlikely(status)) { if (unlikely(status)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
size_t isize = i_size_read(inode);
xfs_vm_write_failed(inode, page, pos, len); xfs_vm_write_failed(inode, page, pos, len);
unlock_page(page); unlock_page(page);
if (pos + len > i_size_read(inode)) /*
truncate_pagecache(inode, i_size_read(inode)); * If the write is beyond EOF, we only want to kill blocks
* allocated in this write, not blocks that were previously
* written successfully.
*/
if (pos + len > isize) {
ssize_t start = max_t(ssize_t, pos, isize);
truncate_pagecache_range(inode, start, pos + len);
}
page_cache_release(page); page_cache_release(page);
page = NULL; page = NULL;
...@@ -1615,9 +1648,12 @@ xfs_vm_write_begin( ...@@ -1615,9 +1648,12 @@ xfs_vm_write_begin(
} }
/* /*
* On failure, we only need to kill delalloc blocks beyond EOF because they * On failure, we only need to kill delalloc blocks beyond EOF in the range of
* will never be written. For blocks within EOF, generic_write_end() zeros them * this specific write because they will never be written. Previous writes
* so they are safe to leave alone and be written with all the other valid data. * beyond EOF where block allocation succeeded do not need to be trashed, so
* only new blocks from this write should be trashed. For blocks within
* EOF, generic_write_end() zeros them so they are safe to leave alone and be
* written with all the other valid data.
*/ */
STATIC int STATIC int
xfs_vm_write_end( xfs_vm_write_end(
...@@ -1640,8 +1676,11 @@ xfs_vm_write_end( ...@@ -1640,8 +1676,11 @@ xfs_vm_write_end(
loff_t to = pos + len; loff_t to = pos + len;
if (to > isize) { if (to > isize) {
truncate_pagecache(inode, isize); /* only kill blocks in this write beyond EOF */
if (pos > isize)
isize = pos;
xfs_vm_kill_delalloc_range(inode, isize, to); xfs_vm_kill_delalloc_range(inode, isize, to);
truncate_pagecache_range(inode, isize, to);
} }
} }
return ret; return ret;
......
...@@ -5413,6 +5413,7 @@ xfs_bmap_shift_extents( ...@@ -5413,6 +5413,7 @@ xfs_bmap_shift_extents(
int whichfork = XFS_DATA_FORK; int whichfork = XFS_DATA_FORK;
int logflags; int logflags;
xfs_filblks_t blockcount = 0; xfs_filblks_t blockcount = 0;
int total_extents;
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
...@@ -5429,7 +5430,6 @@ xfs_bmap_shift_extents( ...@@ -5429,7 +5430,6 @@ xfs_bmap_shift_extents(
ASSERT(current_ext != NULL); ASSERT(current_ext != NULL);
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ifp->if_flags & XFS_IFEXTENTS)) {
/* Read in all the extents */ /* Read in all the extents */
error = xfs_iread_extents(tp, ip, whichfork); error = xfs_iread_extents(tp, ip, whichfork);
...@@ -5456,7 +5456,6 @@ xfs_bmap_shift_extents( ...@@ -5456,7 +5456,6 @@ xfs_bmap_shift_extents(
/* We are going to change core inode */ /* We are going to change core inode */
logflags = XFS_ILOG_CORE; logflags = XFS_ILOG_CORE;
if (ifp->if_flags & XFS_IFBROOT) { if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.firstblock = *firstblock;
...@@ -5467,8 +5466,14 @@ xfs_bmap_shift_extents( ...@@ -5467,8 +5466,14 @@ xfs_bmap_shift_extents(
logflags |= XFS_ILOG_DEXT; logflags |= XFS_ILOG_DEXT;
} }
while (nexts++ < num_exts && /*
*current_ext < XFS_IFORK_NEXTENTS(ip, whichfork)) { * There may be delalloc extents in the data fork before the range we
* are collapsing out, so we cannot
* use the count of real extents here. Instead we have to calculate it
* from the incore fork.
*/
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
while (nexts++ < num_exts && *current_ext < total_extents) {
gotp = xfs_iext_get_ext(ifp, *current_ext); gotp = xfs_iext_get_ext(ifp, *current_ext);
xfs_bmbt_get_all(gotp, &got); xfs_bmbt_get_all(gotp, &got);
...@@ -5556,10 +5561,11 @@ xfs_bmap_shift_extents( ...@@ -5556,10 +5561,11 @@ xfs_bmap_shift_extents(
} }
(*current_ext)++; (*current_ext)++;
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
} }
/* Check if we are done */ /* Check if we are done */
if (*current_ext == XFS_IFORK_NEXTENTS(ip, whichfork)) if (*current_ext == total_extents)
*done = 1; *done = 1;
del_cursor: del_cursor:
...@@ -5568,6 +5574,5 @@ xfs_bmap_shift_extents( ...@@ -5568,6 +5574,5 @@ xfs_bmap_shift_extents(
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
xfs_trans_log_inode(tp, ip, logflags); xfs_trans_log_inode(tp, ip, logflags);
return error; return error;
} }
...@@ -1418,6 +1418,8 @@ xfs_zero_file_space( ...@@ -1418,6 +1418,8 @@ xfs_zero_file_space(
xfs_off_t end_boundary; xfs_off_t end_boundary;
int error; int error;
trace_xfs_zero_file_space(ip);
granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
/* /*
...@@ -1432,9 +1434,18 @@ xfs_zero_file_space( ...@@ -1432,9 +1434,18 @@ xfs_zero_file_space(
ASSERT(end_boundary <= offset + len); ASSERT(end_boundary <= offset + len);
if (start_boundary < end_boundary - 1) { if (start_boundary < end_boundary - 1) {
/* punch out the page cache over the conversion range */ /*
* punch out delayed allocation blocks and the page cache over
* the conversion range
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmap_punch_delalloc_range(ip,
XFS_B_TO_FSBT(mp, start_boundary),
XFS_B_TO_FSB(mp, end_boundary - start_boundary));
xfs_iunlock(ip, XFS_ILOCK_EXCL);
truncate_pagecache_range(VFS_I(ip), start_boundary, truncate_pagecache_range(VFS_I(ip), start_boundary,
end_boundary - 1); end_boundary - 1);
/* convert the blocks */ /* convert the blocks */
error = xfs_alloc_file_space(ip, start_boundary, error = xfs_alloc_file_space(ip, start_boundary,
end_boundary - start_boundary - 1, end_boundary - start_boundary - 1,
......
...@@ -1372,21 +1372,29 @@ xfs_buf_iorequest( ...@@ -1372,21 +1372,29 @@ xfs_buf_iorequest(
xfs_buf_wait_unpin(bp); xfs_buf_wait_unpin(bp);
xfs_buf_hold(bp); xfs_buf_hold(bp);
/* Set the count to 1 initially, this will stop an I/O /*
* Set the count to 1 initially, this will stop an I/O
* completion callout which happens before we have started * completion callout which happens before we have started
* all the I/O from calling xfs_buf_ioend too early. * all the I/O from calling xfs_buf_ioend too early.
*/ */
atomic_set(&bp->b_io_remaining, 1); atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp); _xfs_buf_ioapply(bp);
_xfs_buf_ioend(bp, 1); /*
* If _xfs_buf_ioapply failed, we'll get back here with
* only the reference we took above. _xfs_buf_ioend will
* drop it to zero, so we'd better not queue it for later,
* or we'll free it before it's done.
*/
_xfs_buf_ioend(bp, bp->b_error ? 0 : 1);
xfs_buf_rele(bp); xfs_buf_rele(bp);
} }
/* /*
* Waits for I/O to complete on the buffer supplied. It returns immediately if * Waits for I/O to complete on the buffer supplied. It returns immediately if
* no I/O is pending or there is already a pending error on the buffer. It * no I/O is pending or there is already a pending error on the buffer, in which
* returns the I/O error code, if any, or 0 if there was no error. * case nothing will ever complete. It returns the I/O error code, if any, or
* 0 if there was no error.
*/ */
int int
xfs_buf_iowait( xfs_buf_iowait(
......
...@@ -679,7 +679,7 @@ xfs_file_dio_aio_write( ...@@ -679,7 +679,7 @@ xfs_file_dio_aio_write(
goto out; goto out;
if (mapping->nrpages) { if (mapping->nrpages) {
ret = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
pos, -1); pos, -1);
if (ret) if (ret)
goto out; goto out;
......
...@@ -1334,7 +1334,8 @@ int ...@@ -1334,7 +1334,8 @@ int
xfs_create_tmpfile( xfs_create_tmpfile(
struct xfs_inode *dp, struct xfs_inode *dp,
struct dentry *dentry, struct dentry *dentry,
umode_t mode) umode_t mode,
struct xfs_inode **ipp)
{ {
struct xfs_mount *mp = dp->i_mount; struct xfs_mount *mp = dp->i_mount;
struct xfs_inode *ip = NULL; struct xfs_inode *ip = NULL;
...@@ -1402,7 +1403,6 @@ xfs_create_tmpfile( ...@@ -1402,7 +1403,6 @@ xfs_create_tmpfile(
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
ip->i_d.di_nlink--; ip->i_d.di_nlink--;
d_tmpfile(dentry, VFS_I(ip));
error = xfs_iunlink(tp, ip); error = xfs_iunlink(tp, ip);
if (error) if (error)
goto out_trans_abort; goto out_trans_abort;
...@@ -1415,6 +1415,7 @@ xfs_create_tmpfile( ...@@ -1415,6 +1415,7 @@ xfs_create_tmpfile(
xfs_qm_dqrele(gdqp); xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp); xfs_qm_dqrele(pdqp);
*ipp = ip;
return 0; return 0;
out_trans_abort: out_trans_abort:
......
...@@ -334,7 +334,7 @@ int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, ...@@ -334,7 +334,7 @@ int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
int xfs_create(struct xfs_inode *dp, struct xfs_name *name, int xfs_create(struct xfs_inode *dp, struct xfs_name *name,
umode_t mode, xfs_dev_t rdev, struct xfs_inode **ipp); umode_t mode, xfs_dev_t rdev, struct xfs_inode **ipp);
int xfs_create_tmpfile(struct xfs_inode *dp, struct dentry *dentry, int xfs_create_tmpfile(struct xfs_inode *dp, struct dentry *dentry,
umode_t mode); umode_t mode, struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
......
...@@ -1053,11 +1053,25 @@ xfs_vn_tmpfile( ...@@ -1053,11 +1053,25 @@ xfs_vn_tmpfile(
struct dentry *dentry, struct dentry *dentry,
umode_t mode) umode_t mode)
{ {
int error; int error;
struct xfs_inode *ip;
struct inode *inode;
error = xfs_create_tmpfile(XFS_I(dir), dentry, mode); error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
if (unlikely(error))
return -error;
return -error; inode = VFS_I(ip);
error = xfs_init_security(inode, dir, &dentry->d_name);
if (unlikely(error)) {
iput(inode);
return -error;
}
d_tmpfile(dentry, inode);
return 0;
} }
static const struct inode_operations xfs_inode_operations = { static const struct inode_operations xfs_inode_operations = {
......
...@@ -1181,11 +1181,14 @@ xlog_iodone(xfs_buf_t *bp) ...@@ -1181,11 +1181,14 @@ xlog_iodone(xfs_buf_t *bp)
/* log I/O is always issued ASYNC */ /* log I/O is always issued ASYNC */
ASSERT(XFS_BUF_ISASYNC(bp)); ASSERT(XFS_BUF_ISASYNC(bp));
xlog_state_done_syncing(iclog, aborted); xlog_state_done_syncing(iclog, aborted);
/* /*
* do not reference the buffer (bp) here as we could race * drop the buffer lock now that we are done. Nothing references
* with it being freed after writing the unmount record to the * the buffer after this, so an unmount waiting on this lock can now
* log. * tear it down safely. As such, it is unsafe to reference the buffer
* (bp) after the unlock as we could race with it being freed.
*/ */
xfs_buf_unlock(bp);
} }
/* /*
...@@ -1368,8 +1371,16 @@ xlog_alloc_log( ...@@ -1368,8 +1371,16 @@ xlog_alloc_log(
bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
if (!bp) if (!bp)
goto out_free_log; goto out_free_log;
bp->b_iodone = xlog_iodone;
/*
* The iclogbuf buffer locks are held over IO but we are not going to do
* IO yet. Hence unlock the buffer so that the log IO path can grab it
* when appropriately.
*/
ASSERT(xfs_buf_islocked(bp)); ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
bp->b_iodone = xlog_iodone;
log->l_xbuf = bp; log->l_xbuf = bp;
spin_lock_init(&log->l_icloglock); spin_lock_init(&log->l_icloglock);
...@@ -1398,6 +1409,9 @@ xlog_alloc_log( ...@@ -1398,6 +1409,9 @@ xlog_alloc_log(
if (!bp) if (!bp)
goto out_free_iclog; goto out_free_iclog;
ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
bp->b_iodone = xlog_iodone; bp->b_iodone = xlog_iodone;
iclog->ic_bp = bp; iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr; iclog->ic_data = bp->b_addr;
...@@ -1422,7 +1436,6 @@ xlog_alloc_log( ...@@ -1422,7 +1436,6 @@ xlog_alloc_log(
iclog->ic_callback_tail = &(iclog->ic_callback); iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
ASSERT(xfs_buf_islocked(iclog->ic_bp));
init_waitqueue_head(&iclog->ic_force_wait); init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait); init_waitqueue_head(&iclog->ic_write_wait);
...@@ -1631,6 +1644,12 @@ xlog_cksum( ...@@ -1631,6 +1644,12 @@ xlog_cksum(
* we transition the iclogs to IOERROR state *after* flushing all existing * we transition the iclogs to IOERROR state *after* flushing all existing
* iclogs to disk. This is because we don't want anymore new transactions to be * iclogs to disk. This is because we don't want anymore new transactions to be
* started or completed afterwards. * started or completed afterwards.
*
* We lock the iclogbufs here so that we can serialise against IO completion
* during unmount. We might be processing a shutdown triggered during unmount,
* and that can occur asynchronously to the unmount thread, and hence we need to
* ensure that completes before tearing down the iclogbufs. Hence we need to
* hold the buffer lock across the log IO to acheive that.
*/ */
STATIC int STATIC int
xlog_bdstrat( xlog_bdstrat(
...@@ -1638,6 +1657,7 @@ xlog_bdstrat( ...@@ -1638,6 +1657,7 @@ xlog_bdstrat(
{ {
struct xlog_in_core *iclog = bp->b_fspriv; struct xlog_in_core *iclog = bp->b_fspriv;
xfs_buf_lock(bp);
if (iclog->ic_state & XLOG_STATE_IOERROR) { if (iclog->ic_state & XLOG_STATE_IOERROR) {
xfs_buf_ioerror(bp, EIO); xfs_buf_ioerror(bp, EIO);
xfs_buf_stale(bp); xfs_buf_stale(bp);
...@@ -1645,7 +1665,8 @@ xlog_bdstrat( ...@@ -1645,7 +1665,8 @@ xlog_bdstrat(
/* /*
* It would seem logical to return EIO here, but we rely on * It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of * the log state machine to propagate I/O errors instead of
* doing it here. * doing it here. Similarly, IO completion will unlock the
* buffer, so we don't do it here.
*/ */
return 0; return 0;
} }
...@@ -1847,14 +1868,28 @@ xlog_dealloc_log( ...@@ -1847,14 +1868,28 @@ xlog_dealloc_log(
xlog_cil_destroy(log); xlog_cil_destroy(log);
/* /*
* always need to ensure that the extra buffer does not point to memory * Cycle all the iclogbuf locks to make sure all log IO completion
* owned by another log buffer before we free it. * is done before we tear down these buffers.
*/ */
iclog = log->l_iclog;
for (i = 0; i < log->l_iclog_bufs; i++) {
xfs_buf_lock(iclog->ic_bp);
xfs_buf_unlock(iclog->ic_bp);
iclog = iclog->ic_next;
}
/*
* Always need to ensure that the extra buffer does not point to memory
* owned by another log buffer before we free it. Also, cycle the lock
* first to ensure we've completed IO on it.
*/
xfs_buf_lock(log->l_xbuf);
xfs_buf_unlock(log->l_xbuf);
xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
xfs_buf_free(log->l_xbuf); xfs_buf_free(log->l_xbuf);
iclog = log->l_iclog; iclog = log->l_iclog;
for (i=0; i<log->l_iclog_bufs; i++) { for (i = 0; i < log->l_iclog_bufs; i++) {
xfs_buf_free(iclog->ic_bp); xfs_buf_free(iclog->ic_bp);
next_iclog = iclog->ic_next; next_iclog = iclog->ic_next;
kmem_free(iclog); kmem_free(iclog);
......
...@@ -603,6 +603,7 @@ DEFINE_INODE_EVENT(xfs_readlink); ...@@ -603,6 +603,7 @@ DEFINE_INODE_EVENT(xfs_readlink);
DEFINE_INODE_EVENT(xfs_inactive_symlink); DEFINE_INODE_EVENT(xfs_inactive_symlink);
DEFINE_INODE_EVENT(xfs_alloc_file_space); DEFINE_INODE_EVENT(xfs_alloc_file_space);
DEFINE_INODE_EVENT(xfs_free_file_space); DEFINE_INODE_EVENT(xfs_free_file_space);
DEFINE_INODE_EVENT(xfs_zero_file_space);
DEFINE_INODE_EVENT(xfs_collapse_file_space); DEFINE_INODE_EVENT(xfs_collapse_file_space);
DEFINE_INODE_EVENT(xfs_readdir); DEFINE_INODE_EVENT(xfs_readdir);
#ifdef CONFIG_XFS_POSIX_ACL #ifdef CONFIG_XFS_POSIX_ACL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment