Commit 9978306e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

Pull XFS update from Ben Myers:

 - Removal of xfsbufd
 - Background CIL flushes have been moved to a workqueue.
 - Fix to xfs_check_page_type applicable to filesystems where
   blocksize < page size
 - Fix for stale data exposure when extsize hints are used.
 - A series of xfs_buf cache cleanups.
 - Fix for XFS_IOC_ALLOCSP
 - Cleanups for includes and removal of xfs_lrw.[ch].
 - Moved all busy extent handling to it's own file so that it is easier
   to merge with userspace.
 - Fix for log mount failure.
 - Fix to enable inode reclaim during quotacheck at mount time.
 - Fix for delalloc quota accounting.
 - Fix for memory reclaim deadlock on agi buffer.
 - Fixes for failed writes and to clean up stale delalloc blocks.
 - Fix to use GFP_NOFS in blkdev_issue_flush
 - SEEK_DATA/SEEK_HOLE support

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (57 commits)
  xfs: add trace points for log forces
  xfs: fix memory reclaim deadlock on agi buffer
  xfs: fix delalloc quota accounting on failure
  xfs: protect xfs_sync_worker with s_umount semaphore
  xfs: introduce SEEK_DATA/SEEK_HOLE support
  xfs: make xfs_extent_busy_trim not static
  xfs: make XBF_MAPPED the default behaviour
  xfs: flush outstanding buffers on log mount failure
  xfs: Properly exclude IO type flags from buffer flags
  xfs: clean up xfs_bit.h includes
  xfs: move xfs_do_force_shutdown() and kill xfs_rw.c
  xfs: move xfs_get_extsz_hint() and kill xfs_rw.h
  xfs: move xfs_fsb_to_db to xfs_bmap.h
  xfs: clean up busy extent naming
  xfs: move busy extent handling to it's own file
  xfs: move xfsagino_t to xfs_types.h
  xfs: use iolock on XFS_IOC_ALLOCSP calls
  xfs: kill XBF_DONTBLOCK
  xfs: kill xfs_read_buf()
  xfs: kill XBF_LOCK
  ...
parents abe81e25 14c26c6a
...@@ -7623,7 +7623,7 @@ XFS FILESYSTEM ...@@ -7623,7 +7623,7 @@ XFS FILESYSTEM
P: Silicon Graphics Inc P: Silicon Graphics Inc
M: Ben Myers <bpm@sgi.com> M: Ben Myers <bpm@sgi.com>
M: Alex Elder <elder@kernel.org> M: Alex Elder <elder@kernel.org>
M: xfs-masters@oss.sgi.com M: xfs@oss.sgi.com
L: xfs@oss.sgi.com L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com/xfs/xfs.git T: git git://oss.sgi.com/xfs/xfs.git
......
...@@ -33,6 +33,7 @@ xfs-y += xfs_aops.o \ ...@@ -33,6 +33,7 @@ xfs-y += xfs_aops.o \
xfs_discard.o \ xfs_discard.o \
xfs_error.o \ xfs_error.o \
xfs_export.o \ xfs_export.o \
xfs_extent_busy.o \
xfs_file.o \ xfs_file.o \
xfs_filestream.o \ xfs_filestream.o \
xfs_fsops.o \ xfs_fsops.o \
...@@ -49,7 +50,6 @@ xfs-y += xfs_aops.o \ ...@@ -49,7 +50,6 @@ xfs-y += xfs_aops.o \
xfs_sync.o \ xfs_sync.o \
xfs_xattr.o \ xfs_xattr.o \
xfs_rename.o \ xfs_rename.o \
xfs_rw.o \
xfs_utils.o \ xfs_utils.o \
xfs_vnodeops.o \ xfs_vnodeops.o \
kmem.o \ kmem.o \
......
...@@ -174,24 +174,6 @@ typedef struct xfs_agfl { ...@@ -174,24 +174,6 @@ typedef struct xfs_agfl {
__be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */ __be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */
} xfs_agfl_t; } xfs_agfl_t;
/*
* Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
* have been freed but whose transactions aren't committed to disk yet.
*
* Note that we use the transaction ID to record the transaction, not the
* transaction structure itself. See xfs_alloc_busy_insert() for details.
*/
struct xfs_busy_extent {
struct rb_node rb_node; /* ag by-bno indexed search tree */
struct list_head list; /* transaction busy extent list */
xfs_agnumber_t agno;
xfs_agblock_t bno;
xfs_extlen_t length;
unsigned int flags;
#define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
#define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */
};
/* /*
* Per-ag incore structure, copies of information in agf and agi, * Per-ag incore structure, copies of information in agf and agi,
* to improve the performance of allocation group selection. * to improve the performance of allocation group selection.
......
This diff is collapsed.
...@@ -23,7 +23,6 @@ struct xfs_btree_cur; ...@@ -23,7 +23,6 @@ struct xfs_btree_cur;
struct xfs_mount; struct xfs_mount;
struct xfs_perag; struct xfs_perag;
struct xfs_trans; struct xfs_trans;
struct xfs_busy_extent;
extern struct workqueue_struct *xfs_alloc_wq; extern struct workqueue_struct *xfs_alloc_wq;
...@@ -139,33 +138,6 @@ xfs_extlen_t ...@@ -139,33 +138,6 @@ xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_mount *mp, xfs_alloc_longest_free_extent(struct xfs_mount *mp,
struct xfs_perag *pag); struct xfs_perag *pag);
#ifdef __KERNEL__
void
xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void
xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list,
bool do_discard);
int
xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len);
void
xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
int
xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
static inline void xfs_alloc_busy_sort(struct list_head *list)
{
list_sort(NULL, list, xfs_busy_extent_ag_cmp);
}
#endif /* __KERNEL__ */
/* /*
* Compute and fill in value of m_ag_maxlevels. * Compute and fill in value of m_ag_maxlevels.
*/ */
......
...@@ -18,9 +18,7 @@ ...@@ -18,9 +18,7 @@
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -32,6 +30,7 @@ ...@@ -32,6 +30,7 @@
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_btree.h" #include "xfs_btree.h"
#include "xfs_alloc.h" #include "xfs_alloc.h"
#include "xfs_extent_busy.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_trace.h" #include "xfs_trace.h"
...@@ -94,7 +93,7 @@ xfs_allocbt_alloc_block( ...@@ -94,7 +93,7 @@ xfs_allocbt_alloc_block(
return 0; return 0;
} }
xfs_alloc_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false); xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
xfs_trans_agbtree_delta(cur->bc_tp, 1); xfs_trans_agbtree_delta(cur->bc_tp, 1);
new->s = cpu_to_be32(bno); new->s = cpu_to_be32(bno);
...@@ -119,8 +118,8 @@ xfs_allocbt_free_block( ...@@ -119,8 +118,8 @@ xfs_allocbt_free_block(
if (error) if (error)
return error; return error;
xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
XFS_ALLOC_BUSY_SKIP_DISCARD); XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_trans_agbtree_delta(cur->bc_tp, -1); xfs_trans_agbtree_delta(cur->bc_tp, -1);
return 0; return 0;
} }
......
...@@ -16,9 +16,7 @@ ...@@ -16,9 +16,7 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "xfs.h" #include "xfs.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
#include "xfs_trans.h" #include "xfs_trans.h"
...@@ -29,7 +27,6 @@ ...@@ -29,7 +27,6 @@
#include "xfs_inode_item.h" #include "xfs_inode_item.h"
#include "xfs_alloc.h" #include "xfs_alloc.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_iomap.h" #include "xfs_iomap.h"
#include "xfs_vnodeops.h" #include "xfs_vnodeops.h"
#include "xfs_trace.h" #include "xfs_trace.h"
...@@ -623,7 +620,7 @@ xfs_map_at_offset( ...@@ -623,7 +620,7 @@ xfs_map_at_offset(
* or delayed allocate extent. * or delayed allocate extent.
*/ */
STATIC int STATIC int
xfs_is_delayed_page( xfs_check_page_type(
struct page *page, struct page *page,
unsigned int type) unsigned int type)
{ {
...@@ -637,11 +634,11 @@ xfs_is_delayed_page( ...@@ -637,11 +634,11 @@ xfs_is_delayed_page(
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
if (buffer_unwritten(bh)) if (buffer_unwritten(bh))
acceptable = (type == IO_UNWRITTEN); acceptable += (type == IO_UNWRITTEN);
else if (buffer_delay(bh)) else if (buffer_delay(bh))
acceptable = (type == IO_DELALLOC); acceptable += (type == IO_DELALLOC);
else if (buffer_dirty(bh) && buffer_mapped(bh)) else if (buffer_dirty(bh) && buffer_mapped(bh))
acceptable = (type == IO_OVERWRITE); acceptable += (type == IO_OVERWRITE);
else else
break; break;
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
...@@ -684,7 +681,7 @@ xfs_convert_page( ...@@ -684,7 +681,7 @@ xfs_convert_page(
goto fail_unlock_page; goto fail_unlock_page;
if (page->mapping != inode->i_mapping) if (page->mapping != inode->i_mapping)
goto fail_unlock_page; goto fail_unlock_page;
if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) if (!xfs_check_page_type(page, (*ioendp)->io_type))
goto fail_unlock_page; goto fail_unlock_page;
/* /*
...@@ -834,7 +831,7 @@ xfs_aops_discard_page( ...@@ -834,7 +831,7 @@ xfs_aops_discard_page(
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
loff_t offset = page_offset(page); loff_t offset = page_offset(page);
if (!xfs_is_delayed_page(page, IO_DELALLOC)) if (!xfs_check_page_type(page, IO_DELALLOC))
goto out_invalidate; goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) if (XFS_FORCED_SHUTDOWN(ip->i_mount))
...@@ -1146,7 +1143,14 @@ __xfs_get_blocks( ...@@ -1146,7 +1143,14 @@ __xfs_get_blocks(
if (!create && direct && offset >= i_size_read(inode)) if (!create && direct && offset >= i_size_read(inode))
return 0; return 0;
if (create) { /*
* Direct I/O is usually done on preallocated files, so try getting
* a block mapping without an exclusive lock first. For buffered
* writes we already have the exclusive iolock anyway, so avoiding
* a lock roundtrip here by taking the ilock exclusive from the
* beginning is a useful micro optimization.
*/
if (create && !direct) {
lockmode = XFS_ILOCK_EXCL; lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode); xfs_ilock(ip, lockmode);
} else { } else {
...@@ -1168,23 +1172,45 @@ __xfs_get_blocks( ...@@ -1168,23 +1172,45 @@ __xfs_get_blocks(
(!nimaps || (!nimaps ||
(imap.br_startblock == HOLESTARTBLOCK || (imap.br_startblock == HOLESTARTBLOCK ||
imap.br_startblock == DELAYSTARTBLOCK))) { imap.br_startblock == DELAYSTARTBLOCK))) {
if (direct) { if (direct || xfs_get_extsz_hint(ip)) {
/*
* Drop the ilock in preparation for starting the block
* allocation transaction. It will be retaken
* exclusively inside xfs_iomap_write_direct for the
* actual allocation.
*/
xfs_iunlock(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset, size, error = xfs_iomap_write_direct(ip, offset, size,
&imap, nimaps); &imap, nimaps);
if (error)
return -error;
new = 1;
} else { } else {
/*
* Delalloc reservations do not require a transaction,
* we can go on without dropping the lock here. If we
* are allocating a new delalloc block, make sure that
* we set the new flag so that we mark the buffer new so
* that we know that it is newly allocated if the write
* fails.
*/
if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
new = 1;
error = xfs_iomap_write_delay(ip, offset, size, &imap); error = xfs_iomap_write_delay(ip, offset, size, &imap);
if (error)
goto out_unlock;
xfs_iunlock(ip, lockmode);
} }
if (error)
goto out_unlock;
trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
} else if (nimaps) { } else if (nimaps) {
trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
xfs_iunlock(ip, lockmode);
} else { } else {
trace_xfs_get_blocks_notfound(ip, offset, size); trace_xfs_get_blocks_notfound(ip, offset, size);
goto out_unlock; goto out_unlock;
} }
xfs_iunlock(ip, lockmode);
if (imap.br_startblock != HOLESTARTBLOCK && if (imap.br_startblock != HOLESTARTBLOCK &&
imap.br_startblock != DELAYSTARTBLOCK) { imap.br_startblock != DELAYSTARTBLOCK) {
...@@ -1386,52 +1412,91 @@ xfs_vm_direct_IO( ...@@ -1386,52 +1412,91 @@ xfs_vm_direct_IO(
return ret; return ret;
} }
/*
* Punch out the delalloc blocks we have already allocated.
*
* Don't bother with xfs_setattr given that nothing can have made it to disk yet
* as the page is still locked at this point.
*/
STATIC void
xfs_vm_kill_delalloc_range(
struct inode *inode,
loff_t start,
loff_t end)
{
struct xfs_inode *ip = XFS_I(inode);
xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb;
int error;
start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
if (end_fsb <= start_fsb)
return;
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
end_fsb - start_fsb);
if (error) {
/* something screwed, just bail */
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_alert(ip->i_mount,
"xfs_vm_write_failed: unable to clean up ino %lld",
ip->i_ino);
}
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
STATIC void STATIC void
xfs_vm_write_failed( xfs_vm_write_failed(
struct address_space *mapping, struct inode *inode,
loff_t to) struct page *page,
loff_t pos,
unsigned len)
{ {
struct inode *inode = mapping->host; loff_t block_offset = pos & PAGE_MASK;
loff_t block_start;
loff_t block_end;
loff_t from = pos & (PAGE_CACHE_SIZE - 1);
loff_t to = from + len;
struct buffer_head *bh, *head;
if (to > inode->i_size) { ASSERT(block_offset + from == pos);
/*
* Punch out the delalloc blocks we have already allocated.
*
* Don't bother with xfs_setattr given that nothing can have
* made it to disk yet as the page is still locked at this
* point.
*/
struct xfs_inode *ip = XFS_I(inode);
xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb;
int error;
truncate_pagecache(inode, to, inode->i_size); head = page_buffers(page);
block_start = 0;
for (bh = head; bh != head || !block_start;
bh = bh->b_this_page, block_start = block_end,
block_offset += bh->b_size) {
block_end = block_start + bh->b_size;
/* /* skip buffers before the write */
* Check if there are any blocks that are outside of i_size if (block_end <= from)
* that need to be trimmed back. continue;
*/
start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1; /* if the buffer is after the write, we're done */
end_fsb = XFS_B_TO_FSB(ip->i_mount, to); if (block_start >= to)
if (end_fsb <= start_fsb) break;
return;
if (!buffer_delay(bh))
xfs_ilock(ip, XFS_ILOCK_EXCL); continue;
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
end_fsb - start_fsb); if (!buffer_new(bh) && block_offset < i_size_read(inode))
if (error) { continue;
/* something screwed, just bail */
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_vm_kill_delalloc_range(inode, block_offset,
xfs_alert(ip->i_mount, block_offset + bh->b_size);
"xfs_vm_write_failed: unable to clean up ino %lld",
ip->i_ino);
}
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
} }
} }
/*
* This used to call block_write_begin(), but it unlocks and releases the page
* on error, and we need that page to be able to punch stale delalloc blocks out
* on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
* the appropriate point.
*/
STATIC int STATIC int
xfs_vm_write_begin( xfs_vm_write_begin(
struct file *file, struct file *file,
...@@ -1442,15 +1507,40 @@ xfs_vm_write_begin( ...@@ -1442,15 +1507,40 @@ xfs_vm_write_begin(
struct page **pagep, struct page **pagep,
void **fsdata) void **fsdata)
{ {
int ret; pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
int status;
ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, ASSERT(len <= PAGE_CACHE_SIZE);
pagep, xfs_get_blocks);
if (unlikely(ret)) page = grab_cache_page_write_begin(mapping, index,
xfs_vm_write_failed(mapping, pos + len); flags | AOP_FLAG_NOFS);
return ret; if (!page)
return -ENOMEM;
status = __block_write_begin(page, pos, len, xfs_get_blocks);
if (unlikely(status)) {
struct inode *inode = mapping->host;
xfs_vm_write_failed(inode, page, pos, len);
unlock_page(page);
if (pos + len > i_size_read(inode))
truncate_pagecache(inode, pos + len, i_size_read(inode));
page_cache_release(page);
page = NULL;
}
*pagep = page;
return status;
} }
/*
* On failure, we only need to kill delalloc blocks beyond EOF because they
* will never be written. For blocks within EOF, generic_write_end() zeros them
* so they are safe to leave alone and be written with all the other valid data.
*/
STATIC int STATIC int
xfs_vm_write_end( xfs_vm_write_end(
struct file *file, struct file *file,
...@@ -1463,9 +1553,19 @@ xfs_vm_write_end( ...@@ -1463,9 +1553,19 @@ xfs_vm_write_end(
{ {
int ret; int ret;
ASSERT(len <= PAGE_CACHE_SIZE);
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (unlikely(ret < len)) if (unlikely(ret < len)) {
xfs_vm_write_failed(mapping, pos + len); struct inode *inode = mapping->host;
size_t isize = i_size_read(inode);
loff_t to = pos + len;
if (to > isize) {
truncate_pagecache(inode, to, isize);
xfs_vm_kill_delalloc_range(inode, isize, to);
}
}
return ret; return ret;
} }
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -39,7 +38,6 @@ ...@@ -39,7 +38,6 @@
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_quota.h" #include "xfs_quota.h"
#include "xfs_trans_space.h" #include "xfs_trans_space.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h" #include "xfs_vnodeops.h"
#include "xfs_trace.h" #include "xfs_trace.h"
...@@ -1987,14 +1985,12 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) ...@@ -1987,14 +1985,12 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
(map[i].br_startblock != HOLESTARTBLOCK)); (map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
blkcnt, XBF_LOCK | XBF_DONT_BLOCK, dblkno, blkcnt, 0, &bp);
&bp);
if (error) if (error)
return(error); return(error);
tmp = (valuelen < XFS_BUF_SIZE(bp)) tmp = min_t(int, valuelen, BBTOB(bp->b_length));
? valuelen : XFS_BUF_SIZE(bp);
xfs_buf_iomove(bp, 0, tmp, dst, XBRW_READ); xfs_buf_iomove(bp, 0, tmp, dst, XBRW_READ);
xfs_buf_relse(bp); xfs_buf_relse(bp);
dst += tmp; dst += tmp;
...@@ -2097,6 +2093,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) ...@@ -2097,6 +2093,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
lblkno = args->rmtblkno; lblkno = args->rmtblkno;
valuelen = args->valuelen; valuelen = args->valuelen;
while (valuelen > 0) { while (valuelen > 0) {
int buflen;
/* /*
* Try to remember where we decided to put the value. * Try to remember where we decided to put the value.
*/ */
...@@ -2114,15 +2112,16 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) ...@@ -2114,15 +2112,16 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
XBF_LOCK | XBF_DONT_BLOCK);
if (!bp) if (!bp)
return ENOMEM; return ENOMEM;
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp); buflen = BBTOB(bp->b_length);
tmp = min_t(int, valuelen, buflen);
xfs_buf_iomove(bp, 0, tmp, src, XBRW_WRITE); xfs_buf_iomove(bp, 0, tmp, src, XBRW_WRITE);
if (tmp < XFS_BUF_SIZE(bp)) if (tmp < buflen)
xfs_buf_zero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); xfs_buf_zero(bp, tmp, buflen - tmp);
error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
xfs_buf_relse(bp); xfs_buf_relse(bp);
if (error) if (error)
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -2983,7 +2982,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, ...@@ -2983,7 +2982,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
map.br_blockcount); map.br_blockcount);
bp = xfs_trans_get_buf(*trans, bp = xfs_trans_get_buf(*trans,
dp->i_mount->m_ddev_targp, dp->i_mount->m_ddev_targp,
dblkno, dblkcnt, XBF_LOCK); dblkno, dblkcnt, 0);
if (!bp) if (!bp)
return ENOMEM; return ENOMEM;
xfs_trans_binval(*trans, bp); xfs_trans_binval(*trans, bp);
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include "xfs_rtalloc.h" #include "xfs_rtalloc.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_attr_leaf.h" #include "xfs_attr_leaf.h"
#include "xfs_rw.h"
#include "xfs_quota.h" #include "xfs_quota.h"
#include "xfs_trans_space.h" #include "xfs_trans_space.h"
#include "xfs_buf_item.h" #include "xfs_buf_item.h"
...@@ -4527,7 +4526,7 @@ xfs_bmapi_reserve_delalloc( ...@@ -4527,7 +4526,7 @@ xfs_bmapi_reserve_delalloc(
xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0); xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
out_unreserve_quota: out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp)) if (XFS_IS_QUOTA_ON(mp))
xfs_trans_unreserve_quota_nblks(NULL, ip, alen, 0, rt ? xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
return error; return error;
} }
...@@ -5621,8 +5620,20 @@ xfs_getbmap( ...@@ -5621,8 +5620,20 @@ xfs_getbmap(
XFS_FSB_TO_BB(mp, map[i].br_blockcount); XFS_FSB_TO_BB(mp, map[i].br_blockcount);
out[cur_ext].bmv_unused1 = 0; out[cur_ext].bmv_unused1 = 0;
out[cur_ext].bmv_unused2 = 0; out[cur_ext].bmv_unused2 = 0;
ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
(map[i].br_startblock != DELAYSTARTBLOCK)); /*
* delayed allocation extents that start beyond EOF can
* occur due to speculative EOF allocation when the
* delalloc extent is larger than the largest freespace
* extent at conversion time. These extents cannot be
* converted by data writeback, so can exist here even
* if we are not supposed to be finding delalloc
* extents.
*/
if (map[i].br_startblock == DELAYSTARTBLOCK &&
map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
ASSERT((iflags & BMV_IF_DELALLOC) != 0);
if (map[i].br_startblock == HOLESTARTBLOCK && if (map[i].br_startblock == HOLESTARTBLOCK &&
whichfork == XFS_ATTR_FORK) { whichfork == XFS_ATTR_FORK) {
/* came to the end of attribute fork */ /* came to the end of attribute fork */
...@@ -6157,3 +6168,16 @@ xfs_bmap_punch_delalloc_range( ...@@ -6157,3 +6168,16 @@ xfs_bmap_punch_delalloc_range(
return error; return error;
} }
/*
* Convert the given file system block to a disk block. We have to treat it
* differently based on whether the file is a real time file or not, because the
* bmap code does.
*/
xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
return (XFS_IS_REALTIME_INODE(ip) ? \
(xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
}
...@@ -211,6 +211,9 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, ...@@ -211,6 +211,9 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, int *count); int whichfork, int *count);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length); xfs_fileoff_t start_fsb, xfs_fileoff_t length);
xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __XFS_BMAP_H__ */ #endif /* __XFS_BMAP_H__ */
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
This diff is collapsed.
...@@ -32,11 +32,6 @@ ...@@ -32,11 +32,6 @@
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
typedef enum { typedef enum {
XBRW_READ = 1, /* transfer into target memory */ XBRW_READ = 1, /* transfer into target memory */
XBRW_WRITE = 2, /* transfer from target memory */ XBRW_WRITE = 2, /* transfer from target memory */
...@@ -46,11 +41,9 @@ typedef enum { ...@@ -46,11 +41,9 @@ typedef enum {
#define XBF_READ (1 << 0) /* buffer intended for reading from device */ #define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
/* I/O hints for the BIO layer */ /* I/O hints for the BIO layer */
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
...@@ -58,14 +51,13 @@ typedef enum { ...@@ -58,14 +51,13 @@ typedef enum {
#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ #define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
/* flags used only as arguments to access routines */ /* flags used only as arguments to access routines */
#define XBF_LOCK (1 << 15)/* lock requested */
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */ #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
/* flags used only internally */ /* flags used only internally */
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
#define _XBF_KMEM (1 << 21)/* backed by heap memory */ #define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */ #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
typedef unsigned int xfs_buf_flags_t; typedef unsigned int xfs_buf_flags_t;
...@@ -73,25 +65,18 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -73,25 +65,18 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_READ, "READ" }, \ { XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \ { XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \ { XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_MAPPED, "MAPPED" }, \
{ XBF_ASYNC, "ASYNC" }, \ { XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \ { XBF_DONE, "DONE" }, \
{ XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \ { XBF_STALE, "STALE" }, \
{ XBF_SYNCIO, "SYNCIO" }, \ { XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \ { XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \ { XBF_FLUSH, "FLUSH" }, \
{ XBF_LOCK, "LOCK" }, /* should never be set */\ { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
{ XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \ { _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \ { _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" } { _XBF_DELWRI_Q, "DELWRI_Q" }
typedef enum {
XBT_FORCE_FLUSH = 0,
} xfs_buftarg_flags_t;
typedef struct xfs_buftarg { typedef struct xfs_buftarg {
dev_t bt_dev; dev_t bt_dev;
struct block_device *bt_bdev; struct block_device *bt_bdev;
...@@ -101,12 +86,6 @@ typedef struct xfs_buftarg { ...@@ -101,12 +86,6 @@ typedef struct xfs_buftarg {
unsigned int bt_sshift; unsigned int bt_sshift;
size_t bt_smask; size_t bt_smask;
/* per device delwri queue */
struct task_struct *bt_task;
struct list_head bt_delwri_queue;
spinlock_t bt_delwri_lock;
unsigned long bt_flags;
/* LRU control structures */ /* LRU control structures */
struct shrinker bt_shrinker; struct shrinker bt_shrinker;
struct list_head bt_lru; struct list_head bt_lru;
...@@ -128,8 +107,8 @@ typedef struct xfs_buf { ...@@ -128,8 +107,8 @@ typedef struct xfs_buf {
* fast-path on locking. * fast-path on locking.
*/ */
struct rb_node b_rbnode; /* rbtree node */ struct rb_node b_rbnode; /* rbtree node */
xfs_off_t b_file_offset; /* offset in file */ xfs_daddr_t b_bn; /* block number for I/O */
size_t b_buffer_length;/* size of buffer in bytes */ int b_length; /* size of buffer in BBs */
atomic_t b_hold; /* reference count */ atomic_t b_hold; /* reference count */
atomic_t b_lru_ref; /* lru reclaim ref count */ atomic_t b_lru_ref; /* lru reclaim ref count */
xfs_buf_flags_t b_flags; /* status flags */ xfs_buf_flags_t b_flags; /* status flags */
...@@ -140,8 +119,6 @@ typedef struct xfs_buf { ...@@ -140,8 +119,6 @@ typedef struct xfs_buf {
struct list_head b_list; struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */ struct xfs_perag *b_pag; /* contains rbtree root */
xfs_buftarg_t *b_target; /* buffer target (device) */ xfs_buftarg_t *b_target; /* buffer target (device) */
xfs_daddr_t b_bn; /* block number for I/O */
size_t b_count_desired;/* desired transfer size */
void *b_addr; /* virtual address of buffer */ void *b_addr; /* virtual address of buffer */
struct work_struct b_iodone_work; struct work_struct b_iodone_work;
xfs_buf_iodone_t b_iodone; /* I/O completion function */ xfs_buf_iodone_t b_iodone; /* I/O completion function */
...@@ -150,7 +127,7 @@ typedef struct xfs_buf { ...@@ -150,7 +127,7 @@ typedef struct xfs_buf {
struct xfs_trans *b_transp; struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */ struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */ struct page *b_page_array[XB_PAGES]; /* inline pages */
unsigned long b_queuetime; /* time buffer was queued */ int b_io_length; /* IO size in BBs */
atomic_t b_pin_count; /* pin count */ atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */ atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */ unsigned int b_page_count; /* size of page array */
...@@ -163,26 +140,30 @@ typedef struct xfs_buf { ...@@ -163,26 +140,30 @@ typedef struct xfs_buf {
/* Finding and Reading Buffers */ /* Finding and Reading Buffers */
extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t, struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno,
xfs_buf_flags_t, xfs_buf_t *); size_t numblks, xfs_buf_flags_t flags,
struct xfs_buf *new_bp);
#define xfs_incore(buftarg,blkno,len,lockit) \ #define xfs_incore(buftarg,blkno,len,lockit) \
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL) _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t, struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno,
xfs_buf_flags_t); size_t numblks, xfs_buf_flags_t flags);
extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t, struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno,
xfs_buf_flags_t); size_t numblks, xfs_buf_flags_t flags);
void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno,
struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *, xfs_off_t, size_t, size_t numblks);
xfs_buf_flags_t);
extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len); struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int); struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); size_t numblks, xfs_buf_flags_t flags);
extern void xfs_buf_hold(xfs_buf_t *); void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t); int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
struct xfs_buftarg *target, struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
xfs_daddr_t daddr, size_t length, int flags); int flags);
struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target,
xfs_daddr_t daddr, size_t numblks, int flags);
void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */ /* Releasing Buffers */
extern void xfs_buf_free(xfs_buf_t *); extern void xfs_buf_free(xfs_buf_t *);
...@@ -204,7 +185,7 @@ extern int xfs_bdstrat_cb(struct xfs_buf *); ...@@ -204,7 +185,7 @@ extern int xfs_bdstrat_cb(struct xfs_buf *);
extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
extern int xfs_buf_iorequest(xfs_buf_t *); extern void xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *); extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t); xfs_buf_rw_t);
...@@ -220,24 +201,22 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp) ...@@ -220,24 +201,22 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp)
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
/* Delayed Write Buffer Routines */ /* Delayed Write Buffer Routines */
extern void xfs_buf_delwri_queue(struct xfs_buf *); extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
extern void xfs_buf_delwri_dequeue(struct xfs_buf *); extern int xfs_buf_delwri_submit(struct list_head *);
extern void xfs_buf_delwri_promote(struct xfs_buf *); extern int xfs_buf_delwri_submit_nowait(struct list_head *);
/* Buffer Daemon Setup Routines */ /* Buffer Daemon Setup Routines */
extern int xfs_buf_init(void); extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void); extern void xfs_buf_terminate(void);
#define XFS_BUF_ZEROFLAGS(bp) \ #define XFS_BUF_ZEROFLAGS(bp) \
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
void xfs_buf_stale(struct xfs_buf *bp); void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
...@@ -256,12 +235,6 @@ void xfs_buf_stale(struct xfs_buf *bp); ...@@ -256,12 +235,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_ADDR(bp) ((bp)->b_bn) #define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{ {
...@@ -287,7 +260,6 @@ extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, ...@@ -287,7 +260,6 @@ extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *); extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -123,11 +122,11 @@ xfs_buf_item_log_check( ...@@ -123,11 +122,11 @@ xfs_buf_item_log_check(
ASSERT(bip->bli_logged != NULL); ASSERT(bip->bli_logged != NULL);
bp = bip->bli_buf; bp = bip->bli_buf;
ASSERT(XFS_BUF_COUNT(bp) > 0); ASSERT(bp->b_length > 0);
ASSERT(bp->b_addr != NULL); ASSERT(bp->b_addr != NULL);
orig = bip->bli_orig; orig = bip->bli_orig;
buffer = bp->b_addr; buffer = bp->b_addr;
for (x = 0; x < XFS_BUF_COUNT(bp); x++) { for (x = 0; x < BBTOB(bp->b_length); x++) {
if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
xfs_emerg(bp->b_mount, xfs_emerg(bp->b_mount,
"%s: bip %x buffer %x orig %x index %d", "%s: bip %x buffer %x orig %x index %d",
...@@ -418,7 +417,6 @@ xfs_buf_item_unpin( ...@@ -418,7 +417,6 @@ xfs_buf_item_unpin(
if (freed && stale) { if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE); ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(xfs_buf_islocked(bp)); ASSERT(xfs_buf_islocked(bp));
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp)); ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
...@@ -455,42 +453,42 @@ xfs_buf_item_unpin( ...@@ -455,42 +453,42 @@ xfs_buf_item_unpin(
bp->b_iodone = NULL; bp->b_iodone = NULL;
} else { } else {
spin_lock(&ailp->xa_lock); spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp); xfs_buf_item_relse(bp);
ASSERT(bp->b_fspriv == NULL); ASSERT(bp->b_fspriv == NULL);
} }
xfs_buf_relse(bp); xfs_buf_relse(bp);
} else if (freed && remove) {
xfs_buf_lock(bp);
xfs_buf_ioerror(bp, EIO);
XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp);
xfs_buf_ioend(bp, 0);
} }
} }
/*
* This is called to attempt to lock the buffer associated with this
* buf log item. Don't sleep on the buffer lock. If we can't get
* the lock right away, return 0. If we can get the lock, take a
* reference to the buffer. If this is a delayed write buffer that
* needs AIL help to be written back, invoke the pushbuf routine
* rather than the normal success path.
*/
STATIC uint STATIC uint
xfs_buf_item_trylock( xfs_buf_item_push(
struct xfs_log_item *lip) struct xfs_log_item *lip,
struct list_head *buffer_list)
{ {
struct xfs_buf_log_item *bip = BUF_ITEM(lip); struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf; struct xfs_buf *bp = bip->bli_buf;
uint rval = XFS_ITEM_SUCCESS;
if (xfs_buf_ispinned(bp)) if (xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED; return XFS_ITEM_PINNED;
if (!xfs_buf_trylock(bp)) if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED; return XFS_ITEM_LOCKED;
/* take a reference to the buffer. */
xfs_buf_hold(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_trylock(bip);
if (XFS_BUF_ISDELAYWRITE(bp)) trace_xfs_buf_item_push(bip);
return XFS_ITEM_PUSHBUF;
return XFS_ITEM_SUCCESS; if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
return rval;
} }
/* /*
...@@ -603,49 +601,6 @@ xfs_buf_item_committed( ...@@ -603,49 +601,6 @@ xfs_buf_item_committed(
return lsn; return lsn;
} }
/*
* The buffer is locked, but is not a delayed write buffer. This happens
* if we race with IO completion and hence we don't want to try to write it
* again. Just release the buffer.
*/
STATIC void
xfs_buf_item_push(
struct xfs_log_item *lip)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
trace_xfs_buf_item_push(bip);
xfs_buf_relse(bp);
}
/*
* The buffer is locked and is a delayed write buffer. Promote the buffer
* in the delayed write queue as the caller knows that they must invoke
* the xfsbufd to get this buffer written. We have to unlock the buffer
* to allow the xfsbufd to write it, too.
*/
STATIC bool
xfs_buf_item_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(XFS_BUF_ISDELAYWRITE(bp));
trace_xfs_buf_item_pushbuf(bip);
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
return true;
}
STATIC void STATIC void
xfs_buf_item_committing( xfs_buf_item_committing(
struct xfs_log_item *lip, struct xfs_log_item *lip,
...@@ -661,11 +616,9 @@ static const struct xfs_item_ops xfs_buf_item_ops = { ...@@ -661,11 +616,9 @@ static const struct xfs_item_ops xfs_buf_item_ops = {
.iop_format = xfs_buf_item_format, .iop_format = xfs_buf_item_format,
.iop_pin = xfs_buf_item_pin, .iop_pin = xfs_buf_item_pin,
.iop_unpin = xfs_buf_item_unpin, .iop_unpin = xfs_buf_item_unpin,
.iop_trylock = xfs_buf_item_trylock,
.iop_unlock = xfs_buf_item_unlock, .iop_unlock = xfs_buf_item_unlock,
.iop_committed = xfs_buf_item_committed, .iop_committed = xfs_buf_item_committed,
.iop_push = xfs_buf_item_push, .iop_push = xfs_buf_item_push,
.iop_pushbuf = xfs_buf_item_pushbuf,
.iop_committing = xfs_buf_item_committing .iop_committing = xfs_buf_item_committing
}; };
...@@ -703,7 +656,8 @@ xfs_buf_item_init( ...@@ -703,7 +656,8 @@ xfs_buf_item_init(
* truncate any pieces. map_size is the size of the * truncate any pieces. map_size is the size of the
* bitmap needed to describe the chunks of the buffer. * bitmap needed to describe the chunks of the buffer.
*/ */
chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT); chunks = (int)((BBTOB(bp->b_length) + (XFS_BLF_CHUNK - 1)) >>
XFS_BLF_SHIFT);
map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT); map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone, bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
...@@ -713,7 +667,7 @@ xfs_buf_item_init( ...@@ -713,7 +667,7 @@ xfs_buf_item_init(
xfs_buf_hold(bp); xfs_buf_hold(bp);
bip->bli_format.blf_type = XFS_LI_BUF; bip->bli_format.blf_type = XFS_LI_BUF;
bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); bip->bli_format.blf_len = (ushort)bp->b_length;
bip->bli_format.blf_map_size = map_size; bip->bli_format.blf_map_size = map_size;
#ifdef XFS_TRANS_DEBUG #ifdef XFS_TRANS_DEBUG
...@@ -725,9 +679,9 @@ xfs_buf_item_init( ...@@ -725,9 +679,9 @@ xfs_buf_item_init(
* the buffer to indicate which bytes the callers have asked * the buffer to indicate which bytes the callers have asked
* to have logged. * to have logged.
*/ */
bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp)); memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
#endif #endif
/* /*
...@@ -984,20 +938,27 @@ xfs_buf_iodone_callbacks( ...@@ -984,20 +938,27 @@ xfs_buf_iodone_callbacks(
* If the write was asynchronous then no one will be looking for the * If the write was asynchronous then no one will be looking for the
* error. Clear the error state and write the buffer out again. * error. Clear the error state and write the buffer out again.
* *
* During sync or umount we'll write all pending buffers again * XXX: This helps against transient write errors, but we need to find
* synchronous, which will catch these errors if they keep hanging * a way to shut the filesystem down if the writes keep failing.
* around. *
* In practice we'll shut the filesystem down soon as non-transient
* erorrs tend to affect the whole device and a failing log write
* will make us give up. But we really ought to do better here.
*/ */
if (XFS_BUF_ISASYNC(bp)) { if (XFS_BUF_ISASYNC(bp)) {
ASSERT(bp->b_iodone != NULL);
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
if (!XFS_BUF_ISSTALE(bp)) { if (!XFS_BUF_ISSTALE(bp)) {
xfs_buf_delwri_queue(bp); bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
XFS_BUF_DONE(bp); xfs_bdstrat_cb(bp);
} else {
xfs_buf_relse(bp);
} }
ASSERT(bp->b_iodone != NULL);
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
xfs_buf_relse(bp);
return; return;
} }
...@@ -1045,6 +1006,6 @@ xfs_buf_iodone( ...@@ -1045,6 +1006,6 @@ xfs_buf_iodone(
* Either way, AIL is useless if we're forcing a shutdown. * Either way, AIL is useless if we're forcing a shutdown.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, lip); xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_item_free(BUF_ITEM(lip)); xfs_buf_item_free(BUF_ITEM(lip));
} }
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -2277,20 +2276,20 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps) ...@@ -2277,20 +2276,20 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
if (nbuf == 1) { if (nbuf == 1) {
dabuf->nbuf = 1; dabuf->nbuf = 1;
bp = bps[0]; bp = bps[0];
dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); dabuf->bbcount = bp->b_length;
dabuf->data = bp->b_addr; dabuf->data = bp->b_addr;
dabuf->bps[0] = bp; dabuf->bps[0] = bp;
} else { } else {
dabuf->nbuf = nbuf; dabuf->nbuf = nbuf;
for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) { for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
dabuf->bps[i] = bp = bps[i]; dabuf->bps[i] = bp = bps[i];
dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp)); dabuf->bbcount += bp->b_length;
} }
dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
bp = bps[i]; bp = bps[i];
memcpy((char *)dabuf->data + off, bp->b_addr, memcpy((char *)dabuf->data + off, bp->b_addr,
XFS_BUF_COUNT(bp)); BBTOB(bp->b_length));
} }
} }
return dabuf; return dabuf;
...@@ -2310,10 +2309,10 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf) ...@@ -2310,10 +2309,10 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf)
ASSERT(dabuf->nbuf > 1); ASSERT(dabuf->nbuf > 1);
dabuf->dirty = 0; dabuf->dirty = 0;
for (i = off = 0; i < dabuf->nbuf; for (i = off = 0; i < dabuf->nbuf;
i++, off += XFS_BUF_COUNT(bp)) { i++, off += BBTOB(bp->b_length)) {
bp = dabuf->bps[i]; bp = dabuf->bps[i];
memcpy(bp->b_addr, dabuf->data + off, memcpy(bp->b_addr, dabuf->data + off,
XFS_BUF_COUNT(bp)); BBTOB(bp->b_length));
} }
} }
} }
...@@ -2356,10 +2355,10 @@ xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last) ...@@ -2356,10 +2355,10 @@ xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
} }
dabuf->dirty = 1; dabuf->dirty = 1;
ASSERT(first <= last); ASSERT(first <= last);
for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) { for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
bp = dabuf->bps[i]; bp = dabuf->bps[i];
f = off; f = off;
l = f + XFS_BUF_COUNT(bp) - 1; l = f + BBTOB(bp->b_length) - 1;
if (f < first) if (f < first)
f = first; f = first;
if (l > last) if (l > last)
......
...@@ -18,9 +18,7 @@ ...@@ -18,9 +18,7 @@
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
*/ */
#include "xfs.h" #include "xfs.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_ag.h" #include "xfs_ag.h"
#include "xfs_mount.h" #include "xfs_mount.h"
...@@ -30,6 +29,7 @@ ...@@ -30,6 +29,7 @@
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_alloc.h" #include "xfs_alloc.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_extent_busy.h"
#include "xfs_discard.h" #include "xfs_discard.h"
#include "xfs_trace.h" #include "xfs_trace.h"
...@@ -118,7 +118,7 @@ xfs_trim_extents( ...@@ -118,7 +118,7 @@ xfs_trim_extents(
* If any blocks in the range are still busy, skip the * If any blocks in the range are still busy, skip the
* discard and try again the next time. * discard and try again the next time.
*/ */
if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { if (xfs_extent_busy_search(mp, agno, fbno, flen)) {
trace_xfs_discard_busy(mp, agno, fbno, flen); trace_xfs_discard_busy(mp, agno, fbno, flen);
goto next_extent; goto next_extent;
} }
...@@ -212,7 +212,7 @@ xfs_discard_extents( ...@@ -212,7 +212,7 @@ xfs_discard_extents(
struct xfs_mount *mp, struct xfs_mount *mp,
struct list_head *list) struct list_head *list)
{ {
struct xfs_busy_extent *busyp; struct xfs_extent_busy *busyp;
int error = 0; int error = 0;
list_for_each_entry(busyp, list, list) { list_for_each_entry(busyp, list, list) {
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -857,7 +856,7 @@ xfs_qm_dqflush_done( ...@@ -857,7 +856,7 @@ xfs_qm_dqflush_done(
/* xfs_trans_ail_delete() drops the AIL lock. */ /* xfs_trans_ail_delete() drops the AIL lock. */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->xa_lock);
if (lip->li_lsn == qip->qli_flush_lsn) if (lip->li_lsn == qip->qli_flush_lsn)
xfs_trans_ail_delete(ailp, lip); xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
else else
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->xa_lock);
} }
...@@ -878,8 +877,8 @@ xfs_qm_dqflush_done( ...@@ -878,8 +877,8 @@ xfs_qm_dqflush_done(
*/ */
int int
xfs_qm_dqflush( xfs_qm_dqflush(
xfs_dquot_t *dqp, struct xfs_dquot *dqp,
uint flags) struct xfs_buf **bpp)
{ {
struct xfs_mount *mp = dqp->q_mount; struct xfs_mount *mp = dqp->q_mount;
struct xfs_buf *bp; struct xfs_buf *bp;
...@@ -891,25 +890,30 @@ xfs_qm_dqflush( ...@@ -891,25 +890,30 @@ xfs_qm_dqflush(
trace_xfs_dqflush(dqp); trace_xfs_dqflush(dqp);
/* *bpp = NULL;
* If not dirty, or it's pinned and we are not supposed to block, nada.
*/
if (!XFS_DQ_IS_DIRTY(dqp) ||
((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) {
xfs_dqfunlock(dqp);
return 0;
}
xfs_qm_dqunpin_wait(dqp); xfs_qm_dqunpin_wait(dqp);
/* /*
* This may have been unpinned because the filesystem is shutting * This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this dquot * down forcibly. If that's the case we must not write this dquot
* to disk, because the log record didn't make it to disk! * to disk, because the log record didn't make it to disk.
*
* We also have to remove the log item from the AIL in this case,
* as we wait for an emptry AIL as part of the unmount process.
*/ */
if (XFS_FORCED_SHUTDOWN(mp)) { if (XFS_FORCED_SHUTDOWN(mp)) {
struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
dqp->dq_flags &= ~XFS_DQ_DIRTY; dqp->dq_flags &= ~XFS_DQ_DIRTY;
xfs_dqfunlock(dqp);
return XFS_ERROR(EIO); spin_lock(&mp->m_ail->xa_lock);
if (lip->li_flags & XFS_LI_IN_AIL)
xfs_trans_ail_delete(mp->m_ail, lip,
SHUTDOWN_CORRUPT_INCORE);
else
spin_unlock(&mp->m_ail->xa_lock);
error = XFS_ERROR(EIO);
goto out_unlock;
} }
/* /*
...@@ -917,11 +921,8 @@ xfs_qm_dqflush( ...@@ -917,11 +921,8 @@ xfs_qm_dqflush(
*/ */
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0, &bp); mp->m_quotainfo->qi_dqchunklen, 0, &bp);
if (error) { if (error)
ASSERT(error != ENOENT); goto out_unlock;
xfs_dqfunlock(dqp);
return error;
}
/* /*
* Calculate the location of the dquot inside the buffer. * Calculate the location of the dquot inside the buffer.
...@@ -967,20 +968,13 @@ xfs_qm_dqflush( ...@@ -967,20 +968,13 @@ xfs_qm_dqflush(
xfs_log_force(mp, 0); xfs_log_force(mp, 0);
} }
if (flags & SYNC_WAIT)
error = xfs_bwrite(bp);
else
xfs_buf_delwri_queue(bp);
xfs_buf_relse(bp);
trace_xfs_dqflush_done(dqp); trace_xfs_dqflush_done(dqp);
*bpp = bp;
return 0;
/* out_unlock:
* dqp is still locked, but caller is free to unlock it now. xfs_dqfunlock(dqp);
*/ return XFS_ERROR(EIO);
return error;
} }
/* /*
...@@ -1011,39 +1005,6 @@ xfs_dqlock2( ...@@ -1011,39 +1005,6 @@ xfs_dqlock2(
} }
} }
/*
* Give the buffer a little push if it is incore and
* wait on the flush lock.
*/
void
xfs_dqflock_pushbuf_wait(
xfs_dquot_t *dqp)
{
xfs_mount_t *mp = dqp->q_mount;
xfs_buf_t *bp;
/*
* Check to see if the dquot has been flushed delayed
* write. If so, grab its buffer and send it
* out immediately. We'll be able to acquire
* the flush lock when the I/O completes.
*/
bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
if (!bp)
goto out_lock;
if (XFS_BUF_ISDELAYWRITE(bp)) {
if (xfs_buf_ispinned(bp))
xfs_log_force(mp, 0);
xfs_buf_delwri_promote(bp);
wake_up_process(bp->b_target->bt_task);
}
xfs_buf_relse(bp);
out_lock:
xfs_dqflock(dqp);
}
int __init int __init
xfs_qm_init(void) xfs_qm_init(void)
{ {
......
...@@ -141,7 +141,7 @@ static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type) ...@@ -141,7 +141,7 @@ static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type)
extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint,
uint, struct xfs_dquot **); uint, struct xfs_dquot **);
extern void xfs_qm_dqdestroy(xfs_dquot_t *); extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(xfs_dquot_t *, uint); extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **);
extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *); xfs_disk_dquot_t *);
...@@ -152,7 +152,6 @@ extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, ...@@ -152,7 +152,6 @@ extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
extern void xfs_qm_dqput(xfs_dquot_t *); extern void xfs_qm_dqput(xfs_dquot_t *);
extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp);
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{ {
......
...@@ -17,9 +17,7 @@ ...@@ -17,9 +17,7 @@
*/ */
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
...@@ -108,38 +106,6 @@ xfs_qm_dquot_logitem_unpin( ...@@ -108,38 +106,6 @@ xfs_qm_dquot_logitem_unpin(
wake_up(&dqp->q_pinwait); wake_up(&dqp->q_pinwait);
} }
/*
* Given the logitem, this writes the corresponding dquot entry to disk
* asynchronously. This is called with the dquot entry securely locked;
* we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
* at the end.
*/
STATIC void
xfs_qm_dquot_logitem_push(
struct xfs_log_item *lip)
{
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
/*
* Since we were able to lock the dquot's flush lock and
* we found it on the AIL, the dquot must be dirty. This
* is because the dquot is removed from the AIL while still
* holding the flush lock in xfs_dqflush_done(). Thus, if
* we found it in the AIL and were able to obtain the flush
* lock without sleeping, then there must not have been
* anyone in the process of flushing the dquot.
*/
error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
if (error)
xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
__func__, error, dqp);
xfs_dqunlock(dqp);
}
STATIC xfs_lsn_t STATIC xfs_lsn_t
xfs_qm_dquot_logitem_committed( xfs_qm_dquot_logitem_committed(
struct xfs_log_item *lip, struct xfs_log_item *lip,
...@@ -171,67 +137,15 @@ xfs_qm_dqunpin_wait( ...@@ -171,67 +137,15 @@ xfs_qm_dqunpin_wait(
wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
} }
/*
* This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
* the dquot is locked by us, but the flush lock isn't. So, here we are
* going to see if the relevant dquot buffer is incore, waiting on DELWRI.
* If so, we want to push it out to help us take this item off the AIL as soon
* as possible.
*
* We must not be holding the AIL lock at this point. Calling incore() to
* search the buffer cache can be a time consuming thing, and AIL lock is a
* spinlock.
*/
STATIC bool
xfs_qm_dquot_logitem_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_buf *bp;
bool ret = true;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
* If flushlock isn't locked anymore, chances are that the
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
if (completion_done(&dqp->q_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp);
return true;
}
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
if (xfs_buf_ispinned(bp))
ret = false;
xfs_buf_relse(bp);
return ret;
}
/*
* This is called to attempt to lock the dquot associated with this
* dquot log item. Don't sleep on the dquot lock or the flush lock.
* If the flush lock is already held, indicating that the dquot has
* been or is in the process of being flushed, then see if we can
* find the dquot's buffer in the buffer cache without sleeping. If
* we can and it is marked delayed write, then we want to send it out.
* We delay doing so until the push routine, though, to avoid sleeping
* in any device strategy routines.
*/
STATIC uint STATIC uint
xfs_qm_dquot_logitem_trylock( xfs_qm_dquot_logitem_push(
struct xfs_log_item *lip) struct xfs_log_item *lip,
struct list_head *buffer_list)
{ {
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
struct xfs_buf *bp = NULL;
uint rval = XFS_ITEM_SUCCESS;
int error;
if (atomic_read(&dqp->q_pincount) > 0) if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED; return XFS_ITEM_PINNED;
...@@ -239,16 +153,41 @@ xfs_qm_dquot_logitem_trylock( ...@@ -239,16 +153,41 @@ xfs_qm_dquot_logitem_trylock(
if (!xfs_dqlock_nowait(dqp)) if (!xfs_dqlock_nowait(dqp))
return XFS_ITEM_LOCKED; return XFS_ITEM_LOCKED;
/*
* Re-check the pincount now that we stabilized the value by
* taking the quota lock.
*/
if (atomic_read(&dqp->q_pincount) > 0) {
rval = XFS_ITEM_PINNED;
goto out_unlock;
}
/*
* Someone else is already flushing the dquot. Nothing we can do
* here but wait for the flush to finish and remove the item from
* the AIL.
*/
if (!xfs_dqflock_nowait(dqp)) { if (!xfs_dqflock_nowait(dqp)) {
/* rval = XFS_ITEM_FLUSHING;
* dquot has already been flushed to the backing buffer, goto out_unlock;
* leave it locked, pushbuf routine will unlock it.
*/
return XFS_ITEM_PUSHBUF;
} }
ASSERT(lip->li_flags & XFS_LI_IN_AIL); spin_unlock(&lip->li_ailp->xa_lock);
return XFS_ITEM_SUCCESS;
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
__func__, error, dqp);
} else {
if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_relse(bp);
}
spin_lock(&lip->li_ailp->xa_lock);
out_unlock:
xfs_dqunlock(dqp);
return rval;
} }
/* /*
...@@ -299,11 +238,9 @@ static const struct xfs_item_ops xfs_dquot_item_ops = { ...@@ -299,11 +238,9 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
.iop_format = xfs_qm_dquot_logitem_format, .iop_format = xfs_qm_dquot_logitem_format,
.iop_pin = xfs_qm_dquot_logitem_pin, .iop_pin = xfs_qm_dquot_logitem_pin,
.iop_unpin = xfs_qm_dquot_logitem_unpin, .iop_unpin = xfs_qm_dquot_logitem_unpin,
.iop_trylock = xfs_qm_dquot_logitem_trylock,
.iop_unlock = xfs_qm_dquot_logitem_unlock, .iop_unlock = xfs_qm_dquot_logitem_unlock,
.iop_committed = xfs_qm_dquot_logitem_committed, .iop_committed = xfs_qm_dquot_logitem_committed,
.iop_push = xfs_qm_dquot_logitem_push, .iop_push = xfs_qm_dquot_logitem_push,
.iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
.iop_committing = xfs_qm_dquot_logitem_committing .iop_committing = xfs_qm_dquot_logitem_committing
}; };
...@@ -398,11 +335,13 @@ xfs_qm_qoff_logitem_unpin( ...@@ -398,11 +335,13 @@ xfs_qm_qoff_logitem_unpin(
} }
/* /*
* Quotaoff items have no locking, so just return success. * There isn't much you can do to push a quotaoff item. It is simply
* stuck waiting for the log to be flushed to disk.
*/ */
STATIC uint STATIC uint
xfs_qm_qoff_logitem_trylock( xfs_qm_qoff_logitem_push(
struct xfs_log_item *lip) struct xfs_log_item *lip,
struct list_head *buffer_list)
{ {
return XFS_ITEM_LOCKED; return XFS_ITEM_LOCKED;
} }
...@@ -429,17 +368,6 @@ xfs_qm_qoff_logitem_committed( ...@@ -429,17 +368,6 @@ xfs_qm_qoff_logitem_committed(
return lsn; return lsn;
} }
/*
* There isn't much you can do to push on an quotaoff item. It is simply
* stuck waiting for the log to be flushed to disk.
*/
STATIC void
xfs_qm_qoff_logitem_push(
struct xfs_log_item *lip)
{
}
STATIC xfs_lsn_t STATIC xfs_lsn_t
xfs_qm_qoffend_logitem_committed( xfs_qm_qoffend_logitem_committed(
struct xfs_log_item *lip, struct xfs_log_item *lip,
...@@ -454,7 +382,7 @@ xfs_qm_qoffend_logitem_committed( ...@@ -454,7 +382,7 @@ xfs_qm_qoffend_logitem_committed(
* xfs_trans_ail_delete() drops the AIL lock. * xfs_trans_ail_delete() drops the AIL lock.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
kmem_free(qfs); kmem_free(qfs);
kmem_free(qfe); kmem_free(qfe);
...@@ -487,7 +415,6 @@ static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { ...@@ -487,7 +415,6 @@ static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_format = xfs_qm_qoff_logitem_format, .iop_format = xfs_qm_qoff_logitem_format,
.iop_pin = xfs_qm_qoff_logitem_pin, .iop_pin = xfs_qm_qoff_logitem_pin,
.iop_unpin = xfs_qm_qoff_logitem_unpin, .iop_unpin = xfs_qm_qoff_logitem_unpin,
.iop_trylock = xfs_qm_qoff_logitem_trylock,
.iop_unlock = xfs_qm_qoff_logitem_unlock, .iop_unlock = xfs_qm_qoff_logitem_unlock,
.iop_committed = xfs_qm_qoffend_logitem_committed, .iop_committed = xfs_qm_qoffend_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push, .iop_push = xfs_qm_qoff_logitem_push,
...@@ -502,7 +429,6 @@ static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = { ...@@ -502,7 +429,6 @@ static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_format = xfs_qm_qoff_logitem_format, .iop_format = xfs_qm_qoff_logitem_format,
.iop_pin = xfs_qm_qoff_logitem_pin, .iop_pin = xfs_qm_qoff_logitem_pin,
.iop_unpin = xfs_qm_qoff_logitem_unpin, .iop_unpin = xfs_qm_qoff_logitem_unpin,
.iop_trylock = xfs_qm_qoff_logitem_trylock,
.iop_unlock = xfs_qm_qoff_logitem_unlock, .iop_unlock = xfs_qm_qoff_logitem_unlock,
.iop_committed = xfs_qm_qoff_logitem_committed, .iop_committed = xfs_qm_qoff_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push, .iop_push = xfs_qm_qoff_logitem_push,
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
*/ */
#include "xfs.h" #include "xfs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_inum.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
......
This diff is collapsed.
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* Copyright (c) 2010 David Chinner.
* Copyright (c) 2011 Christoph Hellwig.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_EXTENT_BUSY_H__
#define __XFS_EXTENT_BUSY_H__
/*
* Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
* have been freed but whose transactions aren't committed to disk yet.
*
* Note that we use the transaction ID to record the transaction, not the
* transaction structure itself. See xfs_extent_busy_insert() for details.
*/
struct xfs_extent_busy {
struct rb_node rb_node; /* ag by-bno indexed search tree */
struct list_head list; /* transaction busy extent list */
xfs_agnumber_t agno;
xfs_agblock_t bno;
xfs_extlen_t length;
unsigned int flags;
#define XFS_EXTENT_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
#define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
};
void
xfs_extent_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
void
xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
bool do_discard);
int
xfs_extent_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len);
void
xfs_extent_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
void
xfs_extent_busy_trim(struct xfs_alloc_arg *args, xfs_agblock_t bno,
xfs_extlen_t len, xfs_agblock_t *rbno, xfs_extlen_t *rlen);
int
xfs_extent_busy_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
static inline void xfs_extent_busy_sort(struct list_head *list)
{
list_sort(NULL, list, xfs_extent_busy_ag_cmp);
}
#endif /* __XFS_EXTENT_BUSY_H__ */
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_buf_item.h" #include "xfs_buf_item.h"
#include "xfs_sb.h" #include "xfs_sb.h"
...@@ -64,7 +63,8 @@ __xfs_efi_release( ...@@ -64,7 +63,8 @@ __xfs_efi_release(
if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) { if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
spin_lock(&ailp->xa_lock); spin_lock(&ailp->xa_lock);
/* xfs_trans_ail_delete() drops the AIL lock. */ /* xfs_trans_ail_delete() drops the AIL lock. */
xfs_trans_ail_delete(ailp, &efip->efi_item); xfs_trans_ail_delete(ailp, &efip->efi_item,
SHUTDOWN_LOG_IO_ERROR);
xfs_efi_item_free(efip); xfs_efi_item_free(efip);
} }
} }
...@@ -147,22 +147,20 @@ xfs_efi_item_unpin( ...@@ -147,22 +147,20 @@ xfs_efi_item_unpin(
} }
/* /*
* Efi items have no locking or pushing. However, since EFIs are * Efi items have no locking or pushing. However, since EFIs are pulled from
* pulled from the AIL when their corresponding EFDs are committed * the AIL when their corresponding EFDs are committed to disk, their situation
* to disk, their situation is very similar to being pinned. Return * is very similar to being pinned. Return XFS_ITEM_PINNED so that the caller
* XFS_ITEM_PINNED so that the caller will eventually flush the log. * will eventually flush the log. This should help in getting the EFI out of
* This should help in getting the EFI out of the AIL. * the AIL.
*/ */
STATIC uint STATIC uint
xfs_efi_item_trylock( xfs_efi_item_push(
struct xfs_log_item *lip) struct xfs_log_item *lip,
struct list_head *buffer_list)
{ {
return XFS_ITEM_PINNED; return XFS_ITEM_PINNED;
} }
/*
* Efi items have no locking, so just return.
*/
STATIC void STATIC void
xfs_efi_item_unlock( xfs_efi_item_unlock(
struct xfs_log_item *lip) struct xfs_log_item *lip)
...@@ -189,17 +187,6 @@ xfs_efi_item_committed( ...@@ -189,17 +187,6 @@ xfs_efi_item_committed(
return lsn; return lsn;
} }
/*
* There isn't much you can do to push on an efi item. It is simply
* stuck waiting for all of its corresponding efd items to be
* committed to disk.
*/
STATIC void
xfs_efi_item_push(
struct xfs_log_item *lip)
{
}
/* /*
* The EFI dependency tracking op doesn't do squat. It can't because * The EFI dependency tracking op doesn't do squat. It can't because
* it doesn't know where the free extent is coming from. The dependency * it doesn't know where the free extent is coming from. The dependency
...@@ -222,7 +209,6 @@ static const struct xfs_item_ops xfs_efi_item_ops = { ...@@ -222,7 +209,6 @@ static const struct xfs_item_ops xfs_efi_item_ops = {
.iop_format = xfs_efi_item_format, .iop_format = xfs_efi_item_format,
.iop_pin = xfs_efi_item_pin, .iop_pin = xfs_efi_item_pin,
.iop_unpin = xfs_efi_item_unpin, .iop_unpin = xfs_efi_item_unpin,
.iop_trylock = xfs_efi_item_trylock,
.iop_unlock = xfs_efi_item_unlock, .iop_unlock = xfs_efi_item_unlock,
.iop_committed = xfs_efi_item_committed, .iop_committed = xfs_efi_item_committed,
.iop_push = xfs_efi_item_push, .iop_push = xfs_efi_item_push,
...@@ -404,19 +390,17 @@ xfs_efd_item_unpin( ...@@ -404,19 +390,17 @@ xfs_efd_item_unpin(
} }
/* /*
* Efd items have no locking, so just return success. * There isn't much you can do to push on an efd item. It is simply stuck
* waiting for the log to be flushed to disk.
*/ */
STATIC uint STATIC uint
xfs_efd_item_trylock( xfs_efd_item_push(
struct xfs_log_item *lip) struct xfs_log_item *lip,
struct list_head *buffer_list)
{ {
return XFS_ITEM_LOCKED; return XFS_ITEM_PINNED;
} }
/*
* Efd items have no locking or pushing, so return failure
* so that the caller doesn't bother with us.
*/
STATIC void STATIC void
xfs_efd_item_unlock( xfs_efd_item_unlock(
struct xfs_log_item *lip) struct xfs_log_item *lip)
...@@ -450,16 +434,6 @@ xfs_efd_item_committed( ...@@ -450,16 +434,6 @@ xfs_efd_item_committed(
return (xfs_lsn_t)-1; return (xfs_lsn_t)-1;
} }
/*
* There isn't much you can do to push on an efd item. It is simply
* stuck waiting for the log to be flushed to disk.
*/
STATIC void
xfs_efd_item_push(
struct xfs_log_item *lip)
{
}
/* /*
* The EFD dependency tracking op doesn't do squat. It can't because * The EFD dependency tracking op doesn't do squat. It can't because
* it doesn't know where the free extent is coming from. The dependency * it doesn't know where the free extent is coming from. The dependency
...@@ -482,7 +456,6 @@ static const struct xfs_item_ops xfs_efd_item_ops = { ...@@ -482,7 +456,6 @@ static const struct xfs_item_ops xfs_efd_item_ops = {
.iop_format = xfs_efd_item_format, .iop_format = xfs_efd_item_format,
.iop_pin = xfs_efd_item_pin, .iop_pin = xfs_efd_item_pin,
.iop_unpin = xfs_efd_item_unpin, .iop_unpin = xfs_efd_item_unpin,
.iop_trylock = xfs_efd_item_trylock,
.iop_unlock = xfs_efd_item_unlock, .iop_unlock = xfs_efd_item_unlock,
.iop_committed = xfs_efd_item_committed, .iop_committed = xfs_efd_item_committed,
.iop_push = xfs_efd_item_push, .iop_push = xfs_efd_item_push,
......
This diff is collapsed.
...@@ -18,8 +18,6 @@ ...@@ -18,8 +18,6 @@
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_inum.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
...@@ -39,7 +37,6 @@ ...@@ -39,7 +37,6 @@
#include "xfs_itable.h" #include "xfs_itable.h"
#include "xfs_trans_space.h" #include "xfs_trans_space.h"
#include "xfs_rtalloc.h" #include "xfs_rtalloc.h"
#include "xfs_rw.h"
#include "xfs_filestream.h" #include "xfs_filestream.h"
#include "xfs_trace.h" #include "xfs_trace.h"
...@@ -147,9 +144,9 @@ xfs_growfs_data_private( ...@@ -147,9 +144,9 @@ xfs_growfs_data_private(
if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
return error; return error;
dpct = pct - mp->m_sb.sb_imax_pct; dpct = pct - mp->m_sb.sb_imax_pct;
bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, bp = xfs_buf_read_uncached(mp->m_ddev_targp,
XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) if (!bp)
return EIO; return EIO;
xfs_buf_relse(bp); xfs_buf_relse(bp);
...@@ -193,7 +190,7 @@ xfs_growfs_data_private( ...@@ -193,7 +190,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -230,7 +227,7 @@ xfs_growfs_data_private( ...@@ -230,7 +227,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -259,8 +256,7 @@ xfs_growfs_data_private( ...@@ -259,8 +256,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
BTOBB(mp->m_sb.sb_blocksize), BTOBB(mp->m_sb.sb_blocksize), 0);
XBF_LOCK | XBF_MAPPED);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -286,8 +282,7 @@ xfs_growfs_data_private( ...@@ -286,8 +282,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
BTOBB(mp->m_sb.sb_blocksize), BTOBB(mp->m_sb.sb_blocksize), 0);
XBF_LOCK | XBF_MAPPED);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -314,8 +309,7 @@ xfs_growfs_data_private( ...@@ -314,8 +309,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
BTOBB(mp->m_sb.sb_blocksize), BTOBB(mp->m_sb.sb_blocksize), 0);
XBF_LOCK | XBF_MAPPED);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -405,7 +399,7 @@ xfs_growfs_data_private( ...@@ -405,7 +399,7 @@ xfs_growfs_data_private(
/* update secondary superblocks. */ /* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) { for (agno = 1; agno < nagcount; agno++) {
error = xfs_read_buf(mp, mp->m_ddev_targp, error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp); XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (error) { if (error) {
...@@ -693,3 +687,63 @@ xfs_fs_goingdown( ...@@ -693,3 +687,63 @@ xfs_fs_goingdown(
return 0; return 0;
} }
/*
* Force a shutdown of the filesystem instantly while keeping the filesystem
* consistent. We don't do an unmount here; just shutdown the shop, make sure
* that absolutely nothing persistent happens to this filesystem after this
* point.
*/
void
xfs_do_force_shutdown(
xfs_mount_t *mp,
int flags,
char *fname,
int lnnum)
{
int logerror;
logerror = flags & SHUTDOWN_LOG_IO_ERROR;
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
xfs_notice(mp,
"%s(0x%x) called from line %d of file %s. Return address = 0x%p",
__func__, flags, lnnum, fname, __return_address);
}
/*
* No need to duplicate efforts.
*/
if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
return;
/*
* This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
* queue up anybody new on the log reservations, and wakes up
* everybody who's sleeping on log reservations to tell them
* the bad news.
*/
if (xfs_log_force_umount(mp, logerror))
return;
if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
"Corruption of in-memory data detected. Shutting down filesystem");
if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
} else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
if (logerror) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
"Log I/O Error Detected. Shutting down filesystem");
} else if (flags & SHUTDOWN_DEVICE_REQ) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
"All device paths lost. Shutting down filesystem");
} else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
"I/O Error Detected. Shutting down filesystem");
}
}
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
xfs_alert(mp,
"Please umount the filesystem and rectify the problem(s)");
}
}
...@@ -200,8 +200,7 @@ xfs_ialloc_inode_init( ...@@ -200,8 +200,7 @@ xfs_ialloc_inode_init(
*/ */
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * blks_per_cluster, mp->m_bsize * blks_per_cluster, 0);
XBF_LOCK);
if (!fbuf) if (!fbuf)
return ENOMEM; return ENOMEM;
/* /*
...@@ -610,6 +609,13 @@ xfs_ialloc_get_rec( ...@@ -610,6 +609,13 @@ xfs_ialloc_get_rec(
/* /*
* Visible inode allocation functions. * Visible inode allocation functions.
*/ */
/*
* Find a free (set) bit in the inode bitmask.
*/
static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
{
return xfs_lowbit64(*fp);
}
/* /*
* Allocate an inode on disk. * Allocate an inode on disk.
......
...@@ -46,15 +46,6 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o) ...@@ -46,15 +46,6 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
(xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog)); (xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog));
} }
/*
* Find a free (set) bit in the inode bitmask.
*/
static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
{
return xfs_lowbit64(*fp);
}
/* /*
* Allocate an inode on disk. * Allocate an inode on disk.
* Mode is used to tell whether the new inode will need space, and whether * Mode is used to tell whether the new inode will need space, and whether
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_acl.h" #include "xfs_acl.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
...@@ -123,23 +122,7 @@ xfs_inode_free( ...@@ -123,23 +122,7 @@ xfs_inode_free(
xfs_idestroy_fork(ip, XFS_ATTR_FORK); xfs_idestroy_fork(ip, XFS_ATTR_FORK);
if (ip->i_itemp) { if (ip->i_itemp) {
/* ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
* Only if we are shutting down the fs will we see an
* inode still in the AIL. If it is there, we should remove
* it to prevent a use-after-free from occurring.
*/
xfs_log_item_t *lip = &ip->i_itemp->ili_item;
struct xfs_ail *ailp = lip->li_ailp;
ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
XFS_FORCED_SHUTDOWN(ip->i_mount));
if (lip->li_flags & XFS_LI_IN_AIL) {
spin_lock(&ailp->xa_lock);
if (lip->li_flags & XFS_LI_IN_AIL)
xfs_trans_ail_delete(ailp, lip);
else
spin_unlock(&ailp->xa_lock);
}
xfs_inode_item_destroy(ip); xfs_inode_item_destroy(ip);
ip->i_itemp = NULL; ip->i_itemp = NULL;
} }
...@@ -334,9 +317,10 @@ xfs_iget_cache_miss( ...@@ -334,9 +317,10 @@ xfs_iget_cache_miss(
/* /*
* Preload the radix tree so we can insert safely under the * Preload the radix tree so we can insert safely under the
* write spinlock. Note that we cannot sleep inside the preload * write spinlock. Note that we cannot sleep inside the preload
* region. * region. Since we can be called from transaction context, don't
* recurse into the file system.
*/ */
if (radix_tree_preload(GFP_KERNEL)) { if (radix_tree_preload(GFP_NOFS)) {
error = EAGAIN; error = EAGAIN;
goto out_destroy; goto out_destroy;
} }
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_types.h" #include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
...@@ -61,6 +60,20 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); ...@@ -61,6 +60,20 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
/*
* helper function to extract extent size hint from inode
*/
xfs_extlen_t
xfs_get_extsz_hint(
struct xfs_inode *ip)
{
if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
return ip->i_d.di_extsize;
if (XFS_IS_REALTIME_INODE(ip))
return ip->i_mount->m_sb.sb_rextsize;
return 0;
}
#ifdef DEBUG #ifdef DEBUG
/* /*
* Make sure that the extents in the given memory buffer * Make sure that the extents in the given memory buffer
...@@ -137,6 +150,7 @@ xfs_imap_to_bp( ...@@ -137,6 +150,7 @@ xfs_imap_to_bp(
int ni; int ni;
xfs_buf_t *bp; xfs_buf_t *bp;
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
(int)imap->im_len, buf_flags, &bp); (int)imap->im_len, buf_flags, &bp);
if (error) { if (error) {
...@@ -226,7 +240,7 @@ xfs_inotobp( ...@@ -226,7 +240,7 @@ xfs_inotobp(
if (error) if (error)
return error; return error;
error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags); error = xfs_imap_to_bp(mp, tp, &imap, &bp, 0, imap_flags);
if (error) if (error)
return error; return error;
...@@ -782,8 +796,7 @@ xfs_iread( ...@@ -782,8 +796,7 @@ xfs_iread(
/* /*
* Get pointers to the on-disk inode and the buffer containing it. * Get pointers to the on-disk inode and the buffer containing it.
*/ */
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 0, iget_flags);
XBF_LOCK, iget_flags);
if (error) if (error)
return error; return error;
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
...@@ -1342,7 +1355,7 @@ xfs_iunlink( ...@@ -1342,7 +1355,7 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer, * Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us. * and then we fall through to point the head at us.
*/ */
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
if (error) if (error)
return error; return error;
...@@ -1423,7 +1436,7 @@ xfs_iunlink_remove( ...@@ -1423,7 +1436,7 @@ xfs_iunlink_remove(
* of dealing with the buffer when there is no need to * of dealing with the buffer when there is no need to
* change it. * change it.
*/ */
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
if (error) { if (error) {
xfs_warn(mp, "%s: xfs_itobp() returned error %d.", xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
__func__, error); __func__, error);
...@@ -1484,7 +1497,7 @@ xfs_iunlink_remove( ...@@ -1484,7 +1497,7 @@ xfs_iunlink_remove(
* Now last_ibp points to the buffer previous to us on * Now last_ibp points to the buffer previous to us on
* the unlinked list. Pull us from the list. * the unlinked list. Pull us from the list.
*/ */
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
if (error) { if (error) {
xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.", xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
__func__, error); __func__, error);
...@@ -1566,8 +1579,7 @@ xfs_ifree_cluster( ...@@ -1566,8 +1579,7 @@ xfs_ifree_cluster(
* to mark all the active inodes on the buffer stale. * to mark all the active inodes on the buffer stale.
*/ */
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
mp->m_bsize * blks_per_cluster, mp->m_bsize * blks_per_cluster, 0);
XBF_LOCK);
if (!bp) if (!bp)
return ENOMEM; return ENOMEM;
...@@ -1737,7 +1749,7 @@ xfs_ifree( ...@@ -1737,7 +1749,7 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK); error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0);
if (error) if (error)
return error; return error;
...@@ -2347,11 +2359,11 @@ xfs_iflush_cluster( ...@@ -2347,11 +2359,11 @@ xfs_iflush_cluster(
*/ */
rcu_read_unlock(); rcu_read_unlock();
/* /*
* Clean up the buffer. If it was B_DELWRI, just release it -- * Clean up the buffer. If it was delwri, just release it --
* brelse can handle it with no problems. If not, shut down the * brelse can handle it with no problems. If not, shut down the
* filesystem before releasing the buffer. * filesystem before releasing the buffer.
*/ */
bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp); bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
if (bufwasdelwri) if (bufwasdelwri)
xfs_buf_relse(bp); xfs_buf_relse(bp);
...@@ -2377,30 +2389,29 @@ xfs_iflush_cluster( ...@@ -2377,30 +2389,29 @@ xfs_iflush_cluster(
/* /*
* Unlocks the flush lock * Unlocks the flush lock
*/ */
xfs_iflush_abort(iq); xfs_iflush_abort(iq, false);
kmem_free(ilist); kmem_free(ilist);
xfs_perag_put(pag); xfs_perag_put(pag);
return XFS_ERROR(EFSCORRUPTED); return XFS_ERROR(EFSCORRUPTED);
} }
/* /*
* xfs_iflush() will write a modified inode's changes out to the * Flush dirty inode metadata into the backing buffer.
* inode's on disk home. The caller must have the inode lock held *
* in at least shared mode and the inode flush completion must be * The caller must have the inode lock and the inode flush lock held. The
* active as well. The inode lock will still be held upon return from * inode lock will still be held upon return to the caller, and the inode
* the call and the caller is free to unlock it. * flush lock will be released after the inode has reached the disk.
* The inode flush will be completed when the inode reaches the disk. *
* The flags indicate how the inode's buffer should be written out. * The caller must write out the buffer returned in *bpp and release it.
*/ */
int int
xfs_iflush( xfs_iflush(
xfs_inode_t *ip, struct xfs_inode *ip,
uint flags) struct xfs_buf **bpp)
{ {
xfs_inode_log_item_t *iip; struct xfs_mount *mp = ip->i_mount;
xfs_buf_t *bp; struct xfs_buf *bp;
xfs_dinode_t *dip; struct xfs_dinode *dip;
xfs_mount_t *mp;
int error; int error;
XFS_STATS_INC(xs_iflush_count); XFS_STATS_INC(xs_iflush_count);
...@@ -2410,25 +2421,8 @@ xfs_iflush( ...@@ -2410,25 +2421,8 @@ xfs_iflush(
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
iip = ip->i_itemp; *bpp = NULL;
mp = ip->i_mount;
/*
* We can't flush the inode until it is unpinned, so wait for it if we
* are allowed to block. We know no one new can pin it, because we are
* holding the inode lock shared and you need to hold it exclusively to
* pin the inode.
*
* If we are not allowed to block, force the log out asynchronously so
* that when we come back the inode will be unpinned. If other inodes
* in the same cluster are dirty, they will probably write the inode
* out for us if they occur after the log force completes.
*/
if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
xfs_iunpin(ip);
xfs_ifunlock(ip);
return EAGAIN;
}
xfs_iunpin_wait(ip); xfs_iunpin_wait(ip);
/* /*
...@@ -2447,20 +2441,20 @@ xfs_iflush( ...@@ -2447,20 +2441,20 @@ xfs_iflush(
/* /*
* This may have been unpinned because the filesystem is shutting * This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this inode * down forcibly. If that's the case we must not write this inode
* to disk, because the log record didn't make it to disk! * to disk, because the log record didn't make it to disk.
*
* We also have to remove the log item from the AIL in this case,
* as we wait for an empty AIL as part of the unmount process.
*/ */
if (XFS_FORCED_SHUTDOWN(mp)) { if (XFS_FORCED_SHUTDOWN(mp)) {
if (iip) error = XFS_ERROR(EIO);
iip->ili_fields = 0; goto abort_out;
xfs_ifunlock(ip);
return XFS_ERROR(EIO);
} }
/* /*
* Get the buffer containing the on-disk inode. * Get the buffer containing the on-disk inode.
*/ */
error = xfs_itobp(mp, NULL, ip, &dip, &bp, error = xfs_itobp(mp, NULL, ip, &dip, &bp, XBF_TRYLOCK);
(flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
if (error || !bp) { if (error || !bp) {
xfs_ifunlock(ip); xfs_ifunlock(ip);
return error; return error;
...@@ -2488,23 +2482,20 @@ xfs_iflush( ...@@ -2488,23 +2482,20 @@ xfs_iflush(
if (error) if (error)
goto cluster_corrupt_out; goto cluster_corrupt_out;
if (flags & SYNC_WAIT) *bpp = bp;
error = xfs_bwrite(bp); return 0;
else
xfs_buf_delwri_queue(bp);
xfs_buf_relse(bp);
return error;
corrupt_out: corrupt_out:
xfs_buf_relse(bp); xfs_buf_relse(bp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
cluster_corrupt_out: cluster_corrupt_out:
error = XFS_ERROR(EFSCORRUPTED);
abort_out:
/* /*
* Unlocks the flush lock * Unlocks the flush lock
*/ */
xfs_iflush_abort(ip); xfs_iflush_abort(ip, false);
return XFS_ERROR(EFSCORRUPTED); return error;
} }
...@@ -2706,27 +2697,6 @@ xfs_iflush_int( ...@@ -2706,27 +2697,6 @@ xfs_iflush_int(
return XFS_ERROR(EFSCORRUPTED); return XFS_ERROR(EFSCORRUPTED);
} }
void
xfs_promote_inode(
struct xfs_inode *ip)
{
struct xfs_buf *bp;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
ip->i_imap.im_len, XBF_TRYLOCK);
if (!bp)
return;
if (XFS_BUF_ISDELAYWRITE(bp)) {
xfs_buf_delwri_promote(bp);
wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
}
xfs_buf_relse(bp);
}
/* /*
* Return a pointer to the extent record at file index idx. * Return a pointer to the extent record at file index idx.
*/ */
......
...@@ -529,11 +529,12 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); ...@@ -529,11 +529,12 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int); void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iunpin_wait(xfs_inode_t *); void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint); int xfs_iflush(struct xfs_inode *, struct xfs_buf **);
void xfs_promote_inode(struct xfs_inode *);
void xfs_lock_inodes(xfs_inode_t **, int, uint); void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
#define IHOLD(ip) \ #define IHOLD(ip) \
do { \ do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
......
This diff is collapsed.
...@@ -165,7 +165,7 @@ extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); ...@@ -165,7 +165,7 @@ extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *); extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *); extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_iflush_abort(struct xfs_inode *); extern void xfs_iflush_abort(struct xfs_inode *, bool);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
xfs_inode_log_format_t *); xfs_inode_log_format_t *);
......
...@@ -26,11 +26,6 @@ ...@@ -26,11 +26,6 @@
* high agno_log-agblklog-inopblog bits - 0 * high agno_log-agblklog-inopblog bits - 0
*/ */
typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */
#define NULLFSINO ((xfs_ino_t)-1)
#define NULLAGINO ((xfs_agino_t)-1)
struct xfs_mount; struct xfs_mount;
#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1) #define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
......
...@@ -17,9 +17,7 @@ ...@@ -17,9 +17,7 @@
*/ */
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
...@@ -22,9 +22,7 @@ ...@@ -22,9 +22,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "xfs.h" #include "xfs.h"
#include "xfs_fs.h" #include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment