Commit d28d3730 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.7-rc7' of git://oss.sgi.com/xfs/xfs

Pull xfs bugfixes from Ben Myers:

 - fix attr tree double split corruption

 - fix broken error handling in xfs_vm_writepage

 - drop buffer io reference when a bad bio is built

* tag 'for-linus-v3.7-rc7' of git://oss.sgi.com/xfs/xfs:
  xfs: drop buffer io reference when a bad bio is built
  xfs: fix broken error handling in xfs_vm_writepage
  xfs: fix attr tree double split corruption
parents 5e30c089 d69043c4
...@@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) ...@@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
* *
* The fix is two passes across the ioend list - one to start writeback on the * The fix is two passes across the ioend list - one to start writeback on the
* buffer_heads, and then submit them for I/O on the second pass. * buffer_heads, and then submit them for I/O on the second pass.
*
* If @fail is non-zero, it means that we have a situation where some part of
* the submission process has failed after we have marked paged for writeback
* and unlocked them. In this situation, we need to fail the ioend chain rather
* than submit it to IO. This typically only happens on a filesystem shutdown.
*/ */
STATIC void STATIC void
xfs_submit_ioend( xfs_submit_ioend(
struct writeback_control *wbc, struct writeback_control *wbc,
xfs_ioend_t *ioend) xfs_ioend_t *ioend,
int fail)
{ {
xfs_ioend_t *head = ioend; xfs_ioend_t *head = ioend;
xfs_ioend_t *next; xfs_ioend_t *next;
...@@ -506,6 +512,18 @@ xfs_submit_ioend( ...@@ -506,6 +512,18 @@ xfs_submit_ioend(
next = ioend->io_list; next = ioend->io_list;
bio = NULL; bio = NULL;
/*
* If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately
* as there is only one reference to the ioend at this point in
* time.
*/
if (fail) {
ioend->io_error = -fail;
xfs_finish_ioend(ioend);
continue;
}
for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
if (!bio) { if (!bio) {
...@@ -1060,7 +1078,18 @@ xfs_vm_writepage( ...@@ -1060,7 +1078,18 @@ xfs_vm_writepage(
xfs_start_page_writeback(page, 1, count); xfs_start_page_writeback(page, 1, count);
if (ioend && imap_valid) { /* if there is no IO to be submitted for this page, we are done */
if (!ioend)
return 0;
ASSERT(iohead);
/*
* Any errors from this point onwards need tobe reported through the IO
* completion path as we have marked the initial page as under writeback
* and unlocked it.
*/
if (imap_valid) {
xfs_off_t end_index; xfs_off_t end_index;
end_index = imap.br_startoff + imap.br_blockcount; end_index = imap.br_startoff + imap.br_blockcount;
...@@ -1079,20 +1108,15 @@ xfs_vm_writepage( ...@@ -1079,20 +1108,15 @@ xfs_vm_writepage(
wbc, end_index); wbc, end_index);
} }
if (iohead) {
/*
* Reserve log space if we might write beyond the on-disk
* inode size.
*/
if (ioend->io_type != XFS_IO_UNWRITTEN &&
xfs_ioend_is_append(ioend)) {
err = xfs_setfilesize_trans_alloc(ioend);
if (err)
goto error;
}
xfs_submit_ioend(wbc, iohead); /*
} * Reserve log space if we might write beyond the on-disk inode size.
*/
err = 0;
if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
err = xfs_setfilesize_trans_alloc(ioend);
xfs_submit_ioend(wbc, iohead, err);
return 0; return 0;
......
...@@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ...@@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
leaf2 = blk2->bp->b_addr; leaf2 = blk2->bp->b_addr;
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(leaf2->hdr.count == 0);
args = state->args; args = state->args;
trace_xfs_attr_leaf_rebalance(args); trace_xfs_attr_leaf_rebalance(args);
...@@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ...@@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* I assert that since all callers pass in an empty * I assert that since all callers pass in an empty
* second buffer, this code should never execute. * second buffer, this code should never execute.
*/ */
ASSERT(0);
/* /*
* Figure the total bytes to be added to the destination leaf. * Figure the total bytes to be added to the destination leaf.
...@@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ...@@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
args->index2 = 0; args->index2 = 0;
args->blkno2 = blk2->blkno; args->blkno2 = blk2->blkno;
} else { } else {
/*
* On a double leaf split, the original attr location
* is already stored in blkno2/index2, so don't
* overwrite it overwise we corrupt the tree.
*/
blk2->index = blk1->index blk2->index = blk1->index
- be16_to_cpu(leaf1->hdr.count); - be16_to_cpu(leaf1->hdr.count);
args->index = args->index2 = blk2->index; args->index = blk2->index;
args->blkno = args->blkno2 = blk2->blkno; args->blkno = blk2->blkno;
if (!state->extravalid) {
/*
* set the new attr location to match the old
* one and let the higher level split code
* decide where in the leaf to place it.
*/
args->index2 = blk2->index;
args->blkno2 = blk2->blkno;
}
} }
} else { } else {
ASSERT(state->inleaf == 1); ASSERT(state->inleaf == 1);
......
...@@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io( ...@@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io(
{ {
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
xfs_buf_ioerror(bp, -error); /*
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
if (!bp->b_error)
xfs_buf_ioerror(bp, -error);
if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
_xfs_buf_ioend(bp, 1); _xfs_buf_ioend(bp, 1);
...@@ -1279,6 +1284,11 @@ xfs_buf_ioapply_map( ...@@ -1279,6 +1284,11 @@ xfs_buf_ioapply_map(
if (size) if (size)
goto next_chunk; goto next_chunk;
} else { } else {
/*
* This is guaranteed not to be the last io reference count
* because the caller (xfs_buf_iorequest) holds a count itself.
*/
atomic_dec(&bp->b_io_remaining);
xfs_buf_ioerror(bp, EIO); xfs_buf_ioerror(bp, EIO);
bio_put(bio); bio_put(bio);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment