Commit c4a0c46e authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Theodore Ts'o

ext4: invalidate pages if delalloc block allocation fails.

We are a bit agressive in invalidating all the pages. But
it is ok because we really don't know why the block allocation
failed and it is better to come of the writeback path
so that user can look for more info.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
parent af5bc92d
...@@ -1783,6 +1783,39 @@ static inline void __unmap_underlying_blocks(struct inode *inode, ...@@ -1783,6 +1783,39 @@ static inline void __unmap_underlying_blocks(struct inode *inode,
unmap_underlying_metadata(bdev, bh->b_blocknr + i); unmap_underlying_metadata(bdev, bh->b_blocknr + i);
} }
static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
sector_t logical, long blk_cnt)
{
int nr_pages, i;
pgoff_t index, end;
struct pagevec pvec;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
end = (logical + blk_cnt - 1) >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
index = page->index;
if (index > end)
break;
index++;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
block_invalidatepage(page, 0);
ClearPageUptodate(page);
unlock_page(page);
}
}
return;
}
/* /*
* mpage_da_map_blocks - go through given space * mpage_da_map_blocks - go through given space
* *
...@@ -1792,7 +1825,7 @@ static inline void __unmap_underlying_blocks(struct inode *inode, ...@@ -1792,7 +1825,7 @@ static inline void __unmap_underlying_blocks(struct inode *inode,
* The function skips space we know is already mapped to disk blocks. * The function skips space we know is already mapped to disk blocks.
* *
*/ */
static void mpage_da_map_blocks(struct mpage_da_data *mpd) static int mpage_da_map_blocks(struct mpage_da_data *mpd)
{ {
int err = 0; int err = 0;
struct buffer_head *lbh = &mpd->lbh; struct buffer_head *lbh = &mpd->lbh;
...@@ -1803,7 +1836,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd) ...@@ -1803,7 +1836,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd)
* We consider only non-mapped and non-allocated blocks * We consider only non-mapped and non-allocated blocks
*/ */
if (buffer_mapped(lbh) && !buffer_delay(lbh)) if (buffer_mapped(lbh) && !buffer_delay(lbh))
return; return 0;
new.b_state = lbh->b_state; new.b_state = lbh->b_state;
new.b_blocknr = 0; new.b_blocknr = 0;
...@@ -1814,10 +1847,38 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd) ...@@ -1814,10 +1847,38 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd)
* to write simply return * to write simply return
*/ */
if (!new.b_size) if (!new.b_size)
return; return 0;
err = mpd->get_block(mpd->inode, next, &new, 1); err = mpd->get_block(mpd->inode, next, &new, 1);
if (err) if (err) {
return;
/* If get block returns with error
* we simply return. Later writepage
* will redirty the page and writepages
* will find the dirty page again
*/
if (err == -EAGAIN)
return 0;
/*
* get block failure will cause us
* to loop in writepages. Because
* a_ops->writepage won't be able to
* make progress. The page will be redirtied
* by writepage and writepages will again
* try to write the same.
*/
printk(KERN_EMERG "%s block allocation failed for inode %lu "
"at logical offset %llu with max blocks "
"%zd with error %d\n",
__func__, mpd->inode->i_ino,
(unsigned long long)next,
lbh->b_size >> mpd->inode->i_blkbits, err);
printk(KERN_EMERG "This should not happen.!! "
"Data will be lost\n");
/* invlaidate all the pages */
ext4_da_block_invalidatepages(mpd, next,
lbh->b_size >> mpd->inode->i_blkbits);
return err;
}
BUG_ON(new.b_size == 0); BUG_ON(new.b_size == 0);
if (buffer_new(&new)) if (buffer_new(&new))
...@@ -1830,7 +1891,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd) ...@@ -1830,7 +1891,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd)
if (buffer_delay(lbh) || buffer_unwritten(lbh)) if (buffer_delay(lbh) || buffer_unwritten(lbh))
mpage_put_bnr_to_bhs(mpd, next, &new); mpage_put_bnr_to_bhs(mpd, next, &new);
return; return 0;
} }
#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
...@@ -1899,8 +1960,8 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, ...@@ -1899,8 +1960,8 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
* We couldn't merge the block to our extent, so we * We couldn't merge the block to our extent, so we
* need to flush current extent and start new one * need to flush current extent and start new one
*/ */
mpage_da_map_blocks(mpd); if (mpage_da_map_blocks(mpd) == 0)
mpage_da_submit_io(mpd); mpage_da_submit_io(mpd);
mpd->io_done = 1; mpd->io_done = 1;
return; return;
} }
...@@ -1942,8 +2003,8 @@ static int __mpage_da_writepage(struct page *page, ...@@ -1942,8 +2003,8 @@ static int __mpage_da_writepage(struct page *page,
* and start IO on them using writepage() * and start IO on them using writepage()
*/ */
if (mpd->next_page != mpd->first_page) { if (mpd->next_page != mpd->first_page) {
mpage_da_map_blocks(mpd); if (mpage_da_map_blocks(mpd) == 0)
mpage_da_submit_io(mpd); mpage_da_submit_io(mpd);
/* /*
* skip rest of the page in the page_vec * skip rest of the page in the page_vec
*/ */
...@@ -2046,8 +2107,8 @@ static int mpage_da_writepages(struct address_space *mapping, ...@@ -2046,8 +2107,8 @@ static int mpage_da_writepages(struct address_space *mapping,
* Handle last extent of pages * Handle last extent of pages
*/ */
if (!mpd.io_done && mpd.next_page != mpd.first_page) { if (!mpd.io_done && mpd.next_page != mpd.first_page) {
mpage_da_map_blocks(&mpd); if (mpage_da_map_blocks(&mpd) == 0)
mpage_da_submit_io(&mpd); mpage_da_submit_io(&mpd);
} }
wbc->nr_to_write = to_write - mpd.pages_written; wbc->nr_to_write = to_write - mpd.pages_written;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment