Commit 1538a093 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: add checksum calculation when clearing UNINIT flag in ext4_new_inode
  ext4: Mark the buffer_heads as dirty and uptodate after prepare_write
  ext4: calculate journal credits correctly
  ext4: wait on all pending commits in ext4_sync_fs()
  ext4: Convert to host order before using the values.
  ext4: fix missing ext4_unlock_group in error path
  jbd2: deregister proc on failure in jbd2_journal_init_inode
  jbd2: don't give up looking for space so easily in __jbd2_log_wait_for_space
  jbd: don't give up looking for space so easily in __log_wait_for_space
parents 4bab0ea1 23712a9c
...@@ -718,6 +718,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) ...@@ -718,6 +718,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
free = ext4_free_blocks_after_init(sb, group, gdp); free = ext4_free_blocks_after_init(sb, group, gdp);
gdp->bg_free_blocks_count = cpu_to_le16(free); gdp->bg_free_blocks_count = cpu_to_le16(free);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
gdp);
} }
spin_unlock(sb_bgl_lock(sbi, group)); spin_unlock(sb_bgl_lock(sbi, group));
......
...@@ -2329,6 +2329,8 @@ static int ext4_da_writepage(struct page *page, ...@@ -2329,6 +2329,8 @@ static int ext4_da_writepage(struct page *page,
unlock_page(page); unlock_page(page);
return 0; return 0;
} }
/* now mark the buffer_heads as dirty and uptodate */
block_commit_write(page, 0, PAGE_CACHE_SIZE);
} }
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
...@@ -4580,9 +4582,10 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, ...@@ -4580,9 +4582,10 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{ {
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
return ext4_indirect_trans_blocks(inode, nrblocks, 0); return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
return ext4_ext_index_trans_blocks(inode, nrblocks, 0); return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
} }
/* /*
* Account for index blocks, block groups bitmaps and block group * Account for index blocks, block groups bitmaps and block group
* descriptor blocks if modify datablocks and index blocks * descriptor blocks if modify datablocks and index blocks
......
...@@ -4441,6 +4441,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, ...@@ -4441,6 +4441,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
else if (block >= (entry->start_blk + entry->count)) else if (block >= (entry->start_blk + entry->count))
n = &(*n)->rb_right; n = &(*n)->rb_right;
else { else {
ext4_unlock_group(sb, group);
ext4_error(sb, __func__, ext4_error(sb, __func__,
"Double free of blocks %d (%d %d)\n", "Double free of blocks %d (%d %d)\n",
block, entry->start_blk, entry->count); block, entry->start_blk, entry->count);
......
...@@ -1458,9 +1458,8 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -1458,9 +1458,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
/* We allocate both existing and potentially added groups */ /* We allocate both existing and potentially added groups */
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
((sbi->s_es->s_reserved_gdt_blocks +1 ) << ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
EXT4_DESC_PER_BLOCK_BITS(sb))) / EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
groups_per_flex;
sbi->s_flex_groups = kzalloc(flex_group_count * sbi->s_flex_groups = kzalloc(flex_group_count *
sizeof(struct flex_groups), GFP_KERNEL); sizeof(struct flex_groups), GFP_KERNEL);
if (sbi->s_flex_groups == NULL) { if (sbi->s_flex_groups == NULL) {
...@@ -2885,12 +2884,9 @@ int ext4_force_commit(struct super_block *sb) ...@@ -2885,12 +2884,9 @@ int ext4_force_commit(struct super_block *sb)
/* /*
* Ext4 always journals updates to the superblock itself, so we don't * Ext4 always journals updates to the superblock itself, so we don't
* have to propagate any other updates to the superblock on disk at this * have to propagate any other updates to the superblock on disk at this
* point. Just start an async writeback to get the buffers on their way * point. (We can probably nuke this function altogether, and remove
* to the disk. * any mention to sb->s_dirt in all of fs/ext4; eventual cleanup...)
*
* This implicitly triggers the writebehind on sync().
*/ */
static void ext4_write_super(struct super_block *sb) static void ext4_write_super(struct super_block *sb)
{ {
if (mutex_trylock(&sb->s_lock) != 0) if (mutex_trylock(&sb->s_lock) != 0)
...@@ -2900,15 +2896,15 @@ static void ext4_write_super(struct super_block *sb) ...@@ -2900,15 +2896,15 @@ static void ext4_write_super(struct super_block *sb)
static int ext4_sync_fs(struct super_block *sb, int wait) static int ext4_sync_fs(struct super_block *sb, int wait)
{ {
tid_t target; int ret = 0;
trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait); trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
sb->s_dirt = 0; sb->s_dirt = 0;
if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) {
if (wait) if (wait)
jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target); ret = ext4_force_commit(sb);
} else
return 0; jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
return ret;
} }
/* /*
......
...@@ -115,7 +115,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh) ...@@ -115,7 +115,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
*/ */
void __log_wait_for_space(journal_t *journal) void __log_wait_for_space(journal_t *journal)
{ {
int nblocks; int nblocks, space_left;
assert_spin_locked(&journal->j_state_lock); assert_spin_locked(&journal->j_state_lock);
nblocks = jbd_space_needed(journal); nblocks = jbd_space_needed(journal);
...@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal) ...@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
/* /*
* Test again, another process may have checkpointed while we * Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no * were waiting for the checkpoint lock. If there are no
* outstanding transactions there is nothing to checkpoint and * transactions ready to be checkpointed, try to recover
* we can't make progress. Abort the journal in this case. * journal space by calling cleanup_journal_tail(), and if
* that doesn't work, by waiting for the currently committing
* transaction to complete. If there is absolutely no way
* to make progress, this is either a BUG or corrupted
* filesystem, so abort the journal and leave a stack
* trace for forensic evidence.
*/ */
spin_lock(&journal->j_state_lock); spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock); spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal); nblocks = jbd_space_needed(journal);
if (__log_space_left(journal) < nblocks) { space_left = __log_space_left(journal);
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL; int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
if (journal->j_committing_transaction)
tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
if (chkpt) { if (chkpt) {
log_do_checkpoint(journal); log_do_checkpoint(journal);
} else if (cleanup_journal_tail(journal) == 0) {
/* We were able to recover space; yay! */
;
} else if (tid) {
log_wait_commit(journal, tid);
} else { } else {
printk(KERN_ERR "%s: no transactions\n", printk(KERN_ERR "%s: needed %d blocks and "
__func__); "only had %d space available\n",
__func__, nblocks, space_left);
printk(KERN_ERR "%s: no way to get more "
"journal space\n", __func__);
WARN_ON(1);
journal_abort(journal, 0); journal_abort(journal, 0);
} }
spin_lock(&journal->j_state_lock); spin_lock(&journal->j_state_lock);
} else { } else {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
......
...@@ -116,7 +116,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh) ...@@ -116,7 +116,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
*/ */
void __jbd2_log_wait_for_space(journal_t *journal) void __jbd2_log_wait_for_space(journal_t *journal)
{ {
int nblocks; int nblocks, space_left;
assert_spin_locked(&journal->j_state_lock); assert_spin_locked(&journal->j_state_lock);
nblocks = jbd_space_needed(journal); nblocks = jbd_space_needed(journal);
...@@ -129,25 +129,43 @@ void __jbd2_log_wait_for_space(journal_t *journal) ...@@ -129,25 +129,43 @@ void __jbd2_log_wait_for_space(journal_t *journal)
/* /*
* Test again, another process may have checkpointed while we * Test again, another process may have checkpointed while we
* were waiting for the checkpoint lock. If there are no * were waiting for the checkpoint lock. If there are no
* outstanding transactions there is nothing to checkpoint and * transactions ready to be checkpointed, try to recover
* we can't make progress. Abort the journal in this case. * journal space by calling cleanup_journal_tail(), and if
* that doesn't work, by waiting for the currently committing
* transaction to complete. If there is absolutely no way
* to make progress, this is either a BUG or corrupted
* filesystem, so abort the journal and leave a stack
* trace for forensic evidence.
*/ */
spin_lock(&journal->j_state_lock); spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock); spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal); nblocks = jbd_space_needed(journal);
if (__jbd2_log_space_left(journal) < nblocks) { space_left = __jbd2_log_space_left(journal);
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL; int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
if (journal->j_committing_transaction)
tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
if (chkpt) { if (chkpt) {
jbd2_log_do_checkpoint(journal); jbd2_log_do_checkpoint(journal);
} else if (jbd2_cleanup_journal_tail(journal) == 0) {
/* We were able to recover space; yay! */
;
} else if (tid) {
jbd2_log_wait_commit(journal, tid);
} else { } else {
printk(KERN_ERR "%s: no transactions\n", printk(KERN_ERR "%s: needed %d blocks and "
__func__); "only had %d space available\n",
__func__, nblocks, space_left);
printk(KERN_ERR "%s: no way to get more "
"journal space in %s\n", __func__,
journal->j_devname);
WARN_ON(1);
jbd2_journal_abort(journal, 0); jbd2_journal_abort(journal, 0);
} }
spin_lock(&journal->j_state_lock); spin_lock(&journal->j_state_lock);
} else { } else {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
......
...@@ -1089,6 +1089,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode) ...@@ -1089,6 +1089,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
if (!journal->j_wbuf) { if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
__func__); __func__);
jbd2_stats_proc_exit(journal);
kfree(journal); kfree(journal);
return NULL; return NULL;
} }
...@@ -1098,6 +1099,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode) ...@@ -1098,6 +1099,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
if (err) { if (err) {
printk(KERN_ERR "%s: Cannnot locate journal superblock\n", printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
__func__); __func__);
jbd2_stats_proc_exit(journal);
kfree(journal); kfree(journal);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment