Commit ae6ddcc5 authored by Mingming Cao's avatar Mingming Cao Committed by Linus Torvalds

[PATCH] ext3 and jbd cleanup: remove whitespace

Remove whitespace from ext3 and jbd, before we clone ext4.

Signed-off-by: Mingming Cao<cmm@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e7ab8d65
...@@ -74,7 +74,7 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, ...@@ -74,7 +74,7 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
} }
/* /*
* Read the bitmap for a given block_group, reading into the specified * Read the bitmap for a given block_group, reading into the specified
* slot in the superblock's bitmap cache. * slot in the superblock's bitmap cache.
* *
* Return buffer_head on success or NULL in case of failure. * Return buffer_head on success or NULL in case of failure.
...@@ -419,8 +419,8 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, ...@@ -419,8 +419,8 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
} }
/* @@@ This prevents newly-allocated data from being /* @@@ This prevents newly-allocated data from being
* freed and then reallocated within the same * freed and then reallocated within the same
* transaction. * transaction.
* *
* Ideally we would want to allow that to happen, but to * Ideally we would want to allow that to happen, but to
* do so requires making journal_forget() capable of * do so requires making journal_forget() capable of
* revoking the queued write of a data block, which * revoking the queued write of a data block, which
...@@ -433,7 +433,7 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, ...@@ -433,7 +433,7 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
* safe not to set the allocation bit in the committed * safe not to set the allocation bit in the committed
* bitmap, because we know that there is no outstanding * bitmap, because we know that there is no outstanding
* activity on the buffer any more and so it is safe to * activity on the buffer any more and so it is safe to
* reallocate it. * reallocate it.
*/ */
BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
J_ASSERT_BH(bitmap_bh, J_ASSERT_BH(bitmap_bh,
...@@ -518,7 +518,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode, ...@@ -518,7 +518,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
* data would allow the old block to be overwritten before the * data would allow the old block to be overwritten before the
* transaction committed (because we force data to disk before commit). * transaction committed (because we force data to disk before commit).
* This would lead to corruption if we crashed between overwriting the * This would lead to corruption if we crashed between overwriting the
* data and committing the delete. * data and committing the delete.
* *
* @@@ We may want to make this allocation behaviour conditional on * @@@ We may want to make this allocation behaviour conditional on
* data-writes at some point, and disable it for metadata allocations or * data-writes at some point, and disable it for metadata allocations or
...@@ -584,7 +584,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, ...@@ -584,7 +584,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
if (start > 0) { if (start > 0) {
/* /*
* The goal was occupied; search forward for a free * The goal was occupied; search forward for a free
* block within the next XX blocks. * block within the next XX blocks.
* *
* end_goal is more or less random, but it has to be * end_goal is more or less random, but it has to be
...@@ -1194,7 +1194,7 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries) ...@@ -1194,7 +1194,7 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
/* /*
* ext3_new_block uses a goal block to assist allocation. If the goal is * ext3_new_block uses a goal block to assist allocation. If the goal is
* free, or there is a free block within 32 blocks of the goal, that block * free, or there is a free block within 32 blocks of the goal, that block
* is allocated. Otherwise a forward search is made for a free block; within * is allocated. Otherwise a forward search is made for a free block; within
* each block group the search first looks for an entire free byte in the block * each block group the search first looks for an entire free byte in the block
* bitmap, and then for any free bit if that fails. * bitmap, and then for any free bit if that fails.
* This function also updates quota and i_blocks field. * This function also updates quota and i_blocks field.
...@@ -1303,7 +1303,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, ...@@ -1303,7 +1303,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
smp_rmb(); smp_rmb();
/* /*
* Now search the rest of the groups. We assume that * Now search the rest of the groups. We assume that
* i and gdp correctly point to the last group visited. * i and gdp correctly point to the last group visited.
*/ */
for (bgi = 0; bgi < ngroups; bgi++) { for (bgi = 0; bgi < ngroups; bgi++) {
......
...@@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars) ...@@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
unsigned int i; unsigned int i;
unsigned long sum = 0; unsigned long sum = 0;
if (!map) if (!map)
return (0); return (0);
for (i = 0; i < numchars; i++) for (i = 0; i < numchars; i++)
sum += nibblemap[map->b_data[i] & 0xf] + sum += nibblemap[map->b_data[i] & 0xf] +
......
...@@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype) ...@@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext3_filetype_table[filetype]); return (ext3_filetype_table[filetype]);
} }
int ext3_check_dir_entry (const char * function, struct inode * dir, int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de, struct ext3_dir_entry_2 * de,
...@@ -162,7 +162,7 @@ static int ext3_readdir(struct file * filp, ...@@ -162,7 +162,7 @@ static int ext3_readdir(struct file * filp,
* to make sure. */ * to make sure. */
if (filp->f_version != inode->i_version) { if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) { for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext3_dir_entry_2 *) de = (struct ext3_dir_entry_2 *)
(bh->b_data + i); (bh->b_data + i);
/* It's too expensive to do a full /* It's too expensive to do a full
* dirent test each time round this * dirent test each time round this
...@@ -181,7 +181,7 @@ static int ext3_readdir(struct file * filp, ...@@ -181,7 +181,7 @@ static int ext3_readdir(struct file * filp,
filp->f_version = inode->i_version; filp->f_version = inode->i_version;
} }
while (!error && filp->f_pos < inode->i_size while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) { && offset < sb->s_blocksize) {
de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
if (!ext3_check_dir_entry ("ext3_readdir", inode, de, if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
...@@ -229,7 +229,7 @@ static int ext3_readdir(struct file * filp, ...@@ -229,7 +229,7 @@ static int ext3_readdir(struct file * filp,
/* /*
* These functions convert from the major/minor hash to an f_pos * These functions convert from the major/minor hash to an f_pos
* value. * value.
* *
* Currently we only use major hash numer. This is unfortunate, but * Currently we only use major hash numer. This is unfortunate, but
* on 32-bit machines, the same VFS interface is used for lseek and * on 32-bit machines, the same VFS interface is used for lseek and
* llseek, so if we use the 64 bit offset, then the 32-bit versions of * llseek, so if we use the 64 bit offset, then the 32-bit versions of
...@@ -250,7 +250,7 @@ static int ext3_readdir(struct file * filp, ...@@ -250,7 +250,7 @@ static int ext3_readdir(struct file * filp,
struct fname { struct fname {
__u32 hash; __u32 hash;
__u32 minor_hash; __u32 minor_hash;
struct rb_node rb_hash; struct rb_node rb_hash;
struct fname *next; struct fname *next;
__u32 inode; __u32 inode;
__u8 name_len; __u8 name_len;
...@@ -410,7 +410,7 @@ static int call_filldir(struct file * filp, void * dirent, ...@@ -410,7 +410,7 @@ static int call_filldir(struct file * filp, void * dirent,
curr_pos = hash2pos(fname->hash, fname->minor_hash); curr_pos = hash2pos(fname->hash, fname->minor_hash);
while (fname) { while (fname) {
error = filldir(dirent, fname->name, error = filldir(dirent, fname->name,
fname->name_len, curr_pos, fname->name_len, curr_pos,
fname->inode, fname->inode,
get_dtype(sb, fname->file_type)); get_dtype(sb, fname->file_type));
if (error) { if (error) {
...@@ -465,7 +465,7 @@ static int ext3_dx_readdir(struct file * filp, ...@@ -465,7 +465,7 @@ static int ext3_dx_readdir(struct file * filp,
/* /*
* Fill the rbtree if we have no more entries, * Fill the rbtree if we have no more entries,
* or the inode has changed since we last read in the * or the inode has changed since we last read in the
* cached entries. * cached entries.
*/ */
if ((!info->curr_node) || if ((!info->curr_node) ||
(filp->f_version != inode->i_version)) { (filp->f_version != inode->i_version)) {
......
...@@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t ...@@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t
force_commit: force_commit:
err = ext3_force_commit(inode->i_sb); err = ext3_force_commit(inode->i_sb);
if (err) if (err)
return err; return err;
return ret; return ret;
} }
......
...@@ -8,14 +8,14 @@ ...@@ -8,14 +8,14 @@
* Universite Pierre et Marie Curie (Paris VI) * Universite Pierre et Marie Curie (Paris VI)
* from * from
* linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds
* *
* ext3fs fsync primitive * ext3fs fsync primitive
* *
* Big-endian to little-endian byte-swapping/bitmaps by * Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995 * David S. Miller (davem@caip.rutgers.edu), 1995
* *
* Removed unnecessary code duplication for little endian machines * Removed unnecessary code duplication for little endian machines
* and excessive __inline__s. * and excessive __inline__s.
* Andi Kleen, 1997 * Andi Kleen, 1997
* *
* Major simplications and cleanup - we only need to do the metadata, because * Major simplications and cleanup - we only need to do the metadata, because
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright (C) 2002 by Theodore Ts'o * Copyright (C) 2002 by Theodore Ts'o
* *
* This file is released under the GPL v2. * This file is released under the GPL v2.
* *
* This file may be redistributed under the terms of the GNU Public * This file may be redistributed under the terms of the GNU Public
* License. * License.
*/ */
...@@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) ...@@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
* Returns the hash of a filename. If len is 0 and name is NULL, then * Returns the hash of a filename. If len is 0 and name is NULL, then
* this function can be used to test whether or not a hash version is * this function can be used to test whether or not a hash version is
* supported. * supported.
* *
* The seed is an 4 longword (32 bits) "secret" which can be used to * The seed is an 4 longword (32 bits) "secret" which can be used to
* uniquify a hash. If the seed is all zero's, then some default seed * uniquify a hash. If the seed is all zero's, then some default seed
* may be used. * may be used.
* *
* A particular hash version specifies whether or not the seed is * A particular hash version specifies whether or not the seed is
* represented, and whether or not the returned hash is 32 bits or 64 * represented, and whether or not the returned hash is 32 bits or 64
* bits. 32 bit hashes will return 0 for the minor hash. * bits. 32 bit hashes will return 0 for the minor hash.
......
...@@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent) ...@@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
continue; continue;
if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
continue; continue;
if (!best_desc || if (!best_desc ||
(le16_to_cpu(desc->bg_free_blocks_count) > (le16_to_cpu(desc->bg_free_blocks_count) >
le16_to_cpu(best_desc->bg_free_blocks_count))) { le16_to_cpu(best_desc->bg_free_blocks_count))) {
best_group = group; best_group = group;
...@@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent) ...@@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
return best_group; return best_group;
} }
/* /*
* Orlov's allocator for directories. * Orlov's allocator for directories.
* *
* We always try to spread first-level directories. * We always try to spread first-level directories.
* *
* If there are blockgroups with both free inodes and free blocks counts * If there are blockgroups with both free inodes and free blocks counts
* not worse than average we return one with smallest directory count. * not worse than average we return one with smallest directory count.
* Otherwise we simply return a random group. * Otherwise we simply return a random group.
* *
* For the rest rules look so: * For the rest rules look so:
* *
* It's OK to put directory into a group unless * It's OK to put directory into a group unless
* it has too many directories already (max_dirs) or * it has too many directories already (max_dirs) or
* it has too few free inodes left (min_inodes) or * it has too few free inodes left (min_inodes) or
* it has too few free blocks left (min_blocks) or * it has too few free blocks left (min_blocks) or
* it's already running too large debt (max_debt). * it's already running too large debt (max_debt).
* Parent's group is prefered, if it doesn't satisfy these * Parent's group is prefered, if it doesn't satisfy these
* conditions we search cyclically through the rest. If none * conditions we search cyclically through the rest. If none
* of the groups look good we just look for a group with more * of the groups look good we just look for a group with more
* free inodes than average (starting at parent's group). * free inodes than average (starting at parent's group).
* *
* Debt is incremented each time we allocate a directory and decremented * Debt is incremented each time we allocate a directory and decremented
* when we allocate an inode, within 0--255. * when we allocate an inode, within 0--255.
*/ */
#define INODE_COST 64 #define INODE_COST 64
#define BLOCK_COST 256 #define BLOCK_COST 256
...@@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
group = find_group_dir(sb, dir); group = find_group_dir(sb, dir);
else else
group = find_group_orlov(sb, dir); group = find_group_orlov(sb, dir);
} else } else
group = find_group_other(sb, dir); group = find_group_other(sb, dir);
err = -ENOSPC; err = -ENOSPC;
......
...@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode) ...@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode)
/* /*
* The ext3 forget function must perform a revoke if we are freeing data * The ext3 forget function must perform a revoke if we are freeing data
* which has been journaled. Metadata (eg. indirect blocks) must be * which has been journaled. Metadata (eg. indirect blocks) must be
* revoked in all cases. * revoked in all cases.
* *
* "bh" may be NULL: a metadata block may have been freed from memory * "bh" may be NULL: a metadata block may have been freed from memory
* but there may still be a record of it in the journal, and that record * but there may still be a record of it in the journal, and that record
...@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, ...@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
* Work out how many blocks we need to proceed with the next chunk of a * Work out how many blocks we need to proceed with the next chunk of a
* truncate transaction. * truncate transaction.
*/ */
static unsigned long blocks_for_truncate(struct inode *inode) static unsigned long blocks_for_truncate(struct inode *inode)
{ {
unsigned long needed; unsigned long needed;
...@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode) ...@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode)
/* But we need to bound the transaction so we don't overflow the /* But we need to bound the transaction so we don't overflow the
* journal. */ * journal. */
if (needed > EXT3_MAX_TRANS_DATA) if (needed > EXT3_MAX_TRANS_DATA)
needed = EXT3_MAX_TRANS_DATA; needed = EXT3_MAX_TRANS_DATA;
return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
} }
/* /*
* Truncate transactions can be complex and absolutely huge. So we need to * Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make * be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal. * sure we don't overflow the journal.
...@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode) ...@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode)
* start_transaction gets us a new handle for a truncate transaction, * start_transaction gets us a new handle for a truncate transaction,
* and extend_transaction tries to extend the existing one a bit. If * and extend_transaction tries to extend the existing one a bit. If
* extend fails, we need to propagate the failure up and restart the * extend fails, we need to propagate the failure up and restart the
* transaction in the top-level truncate loop. --sct * transaction in the top-level truncate loop. --sct
*/ */
static handle_t *start_transaction(struct inode *inode) static handle_t *start_transaction(struct inode *inode)
{ {
handle_t *result; handle_t *result;
...@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode) ...@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode)
ext3_orphan_del(handle, inode); ext3_orphan_del(handle, inode);
EXT3_I(inode)->i_dtime = get_seconds(); EXT3_I(inode)->i_dtime = get_seconds();
/* /*
* One subtle ordering requirement: if anything has gone wrong * One subtle ordering requirement: if anything has gone wrong
* (transaction abort, IO errors, whatever), then we can still * (transaction abort, IO errors, whatever), then we can still
* do these next steps (the fs will already have been marked as * do these next steps (the fs will already have been marked as
* having errors), but we can't free the inode if the mark_dirty * having errors), but we can't free the inode if the mark_dirty
* fails. * fails.
*/ */
if (ext3_mark_inode_dirty(handle, inode)) if (ext3_mark_inode_dirty(handle, inode))
/* If that failed, just do the required in-core inode clear. */ /* If that failed, just do the required in-core inode clear. */
...@@ -398,7 +398,7 @@ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, ...@@ -398,7 +398,7 @@ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
* + if there is a block to the left of our position - allocate near it. * + if there is a block to the left of our position - allocate near it.
* + if pointer will live in indirect block - allocate near that block. * + if pointer will live in indirect block - allocate near that block.
* + if pointer will live in inode - allocate in the same * + if pointer will live in inode - allocate in the same
* cylinder group. * cylinder group.
* *
* In the latter case we colour the starting block by the callers PID to * In the latter case we colour the starting block by the callers PID to
* prevent it from clashing with concurrent allocations for a different inode * prevent it from clashing with concurrent allocations for a different inode
...@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, ...@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
jbd_debug(5, "splicing indirect only\n"); jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, where->bh); err = ext3_journal_dirty_metadata(handle, where->bh);
if (err) if (err)
goto err_out; goto err_out;
} else { } else {
/* /*
...@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle, ...@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle,
* So what we do is to rely on the fact that journal_stop/journal_start * So what we do is to rely on the fact that journal_stop/journal_start
* will _not_ run commit under these circumstances because handle->h_ref * will _not_ run commit under these circumstances because handle->h_ref
* is elevated. We'll still have enough credits for the tiny quotafile * is elevated. We'll still have enough credits for the tiny quotafile
* write. * write.
*/ */
static int do_journal_get_write_access(handle_t *handle, static int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh) struct buffer_head *bh)
...@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file, ...@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file,
if (inode->i_size > EXT3_I(inode)->i_disksize) { if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size; EXT3_I(inode)->i_disksize = inode->i_size;
ret2 = ext3_mark_inode_dirty(handle, inode); ret2 = ext3_mark_inode_dirty(handle, inode);
if (!ret) if (!ret)
ret = ret2; ret = ret2;
} }
ret2 = ext3_journal_stop(handle); ret2 = ext3_journal_stop(handle);
...@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file, ...@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file,
return ret; return ret;
} }
/* /*
* bmap() is special. It gets used by applications such as lilo and by * bmap() is special. It gets used by applications such as lilo and by
* the swapper to find the on-disk block of a specific piece of data. * the swapper to find the on-disk block of a specific piece of data.
* *
...@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file, ...@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file,
* filesystem and enables swap, then they may get a nasty shock when the * filesystem and enables swap, then they may get a nasty shock when the
* data getting swapped to that swapfile suddenly gets overwritten by * data getting swapped to that swapfile suddenly gets overwritten by
* the original zero's written out previously to the journal and * the original zero's written out previously to the journal and
* awaiting writeback in the kernel's buffer cache. * awaiting writeback in the kernel's buffer cache.
* *
* So, if we see any bmap calls here on a modified, data-journaled file, * So, if we see any bmap calls here on a modified, data-journaled file,
* take extra steps to flush any blocks which might be in the cache. * take extra steps to flush any blocks which might be in the cache.
*/ */
static sector_t ext3_bmap(struct address_space *mapping, sector_t block) static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
{ {
...@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) ...@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
int err; int err;
if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
/* /*
* This is a REALLY heavyweight approach, but the use of * This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare: * bmap on dirty files is expected to be extremely rare:
* only if we run lilo or swapon on a freshly made file * only if we run lilo or swapon on a freshly made file
* do we expect this to happen. * do we expect this to happen.
* *
* (bmap requires CAP_SYS_RAWIO so this does not * (bmap requires CAP_SYS_RAWIO so this does not
* represent an unprivileged user DOS attack --- we'd be * represent an unprivileged user DOS attack --- we'd be
* in trouble if mortal users could trigger this path at * in trouble if mortal users could trigger this path at
* will.) * will.)
* *
* NB. EXT3_STATE_JDATA is not set on files other than * NB. EXT3_STATE_JDATA is not set on files other than
* regular files. If somebody wants to bmap a directory * regular files. If somebody wants to bmap a directory
...@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page, ...@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page,
*/ */
/* /*
* And attach them to the current transaction. But only if * And attach them to the current transaction. But only if
* block_write_full_page() succeeded. Otherwise they are unmapped, * block_write_full_page() succeeded. Otherwise they are unmapped,
* and generally junk. * and generally junk.
*/ */
...@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, ...@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
} }
} }
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, offset, nr_segs,
ext3_get_block, NULL); ext3_get_block, NULL);
...@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, ...@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
__le32 *first, __le32 *last) __le32 *first, __le32 *last)
{ {
ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
unsigned long count = 0; /* Number of blocks in the run */ unsigned long count = 0; /* Number of blocks in the run */
__le32 *block_to_free_p = NULL; /* Pointer into inode/ind __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
corresponding to corresponding to
block_to_free */ block_to_free */
...@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, ...@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
} else if (nr == block_to_free + count) { } else if (nr == block_to_free + count) {
count++; count++;
} else { } else {
ext3_clear_blocks(handle, inode, this_bh, ext3_clear_blocks(handle, inode, this_bh,
block_to_free, block_to_free,
count, block_to_free_p, p); count, block_to_free_p, p);
block_to_free = nr; block_to_free = nr;
...@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, ...@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
*p = 0; *p = 0;
BUFFER_TRACE(parent_bh, BUFFER_TRACE(parent_bh,
"call ext3_journal_dirty_metadata"); "call ext3_journal_dirty_metadata");
ext3_journal_dirty_metadata(handle, ext3_journal_dirty_metadata(handle,
parent_bh); parent_bh);
} }
} }
...@@ -2704,7 +2704,7 @@ void ext3_read_inode(struct inode * inode) ...@@ -2704,7 +2704,7 @@ void ext3_read_inode(struct inode * inode)
if (raw_inode->i_block[0]) if (raw_inode->i_block[0])
init_special_inode(inode, inode->i_mode, init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
else else
init_special_inode(inode, inode->i_mode, init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
} }
...@@ -2724,8 +2724,8 @@ void ext3_read_inode(struct inode * inode) ...@@ -2724,8 +2724,8 @@ void ext3_read_inode(struct inode * inode)
* *
* The caller must have write access to iloc->bh. * The caller must have write access to iloc->bh.
*/ */
static int ext3_do_update_inode(handle_t *handle, static int ext3_do_update_inode(handle_t *handle,
struct inode *inode, struct inode *inode,
struct ext3_iloc *iloc) struct ext3_iloc *iloc)
{ {
struct ext3_inode *raw_inode = ext3_raw_inode(iloc); struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
...@@ -2900,7 +2900,7 @@ int ext3_write_inode(struct inode *inode, int wait) ...@@ -2900,7 +2900,7 @@ int ext3_write_inode(struct inode *inode, int wait)
* commit will leave the blocks being flushed in an unused state on * commit will leave the blocks being flushed in an unused state on
* disk. (On recovery, the inode will get truncated and the blocks will * disk. (On recovery, the inode will get truncated and the blocks will
* be freed, so we have a strong guarantee that no future commit will * be freed, so we have a strong guarantee that no future commit will
* leave these blocks visible to the user.) * leave these blocks visible to the user.)
* *
* Called with inode->sem down. * Called with inode->sem down.
*/ */
...@@ -3043,13 +3043,13 @@ int ext3_mark_iloc_dirty(handle_t *handle, ...@@ -3043,13 +3043,13 @@ int ext3_mark_iloc_dirty(handle_t *handle,
return err; return err;
} }
/* /*
* On success, We end up with an outstanding reference count against * On success, We end up with an outstanding reference count against
* iloc->bh. This _must_ be cleaned up later. * iloc->bh. This _must_ be cleaned up later.
*/ */
int int
ext3_reserve_inode_write(handle_t *handle, struct inode *inode, ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext3_iloc *iloc) struct ext3_iloc *iloc)
{ {
int err = 0; int err = 0;
...@@ -3139,7 +3139,7 @@ void ext3_dirty_inode(struct inode *inode) ...@@ -3139,7 +3139,7 @@ void ext3_dirty_inode(struct inode *inode)
} }
#if 0 #if 0
/* /*
* Bind an inode's backing buffer_head into this transaction, to prevent * Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike * it from being flushed to disk early. Unlike
* ext3_reserve_inode_write, this leaves behind no bh reference and * ext3_reserve_inode_write, this leaves behind no bh reference and
...@@ -3157,7 +3157,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode) ...@@ -3157,7 +3157,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode)
BUFFER_TRACE(iloc.bh, "get_write_access"); BUFFER_TRACE(iloc.bh, "get_write_access");
err = journal_get_write_access(handle, iloc.bh); err = journal_get_write_access(handle, iloc.bh);
if (!err) if (!err)
err = ext3_journal_dirty_metadata(handle, err = ext3_journal_dirty_metadata(handle,
iloc.bh); iloc.bh);
brelse(iloc.bh); brelse(iloc.bh);
} }
......
...@@ -76,7 +76,7 @@ static struct buffer_head *ext3_append(handle_t *handle, ...@@ -76,7 +76,7 @@ static struct buffer_head *ext3_append(handle_t *handle,
#ifdef DX_DEBUG #ifdef DX_DEBUG
#define dxtrace(command) command #define dxtrace(command) command
#else #else
#define dxtrace(command) #define dxtrace(command)
#endif #endif
struct fake_dirent struct fake_dirent
...@@ -169,7 +169,7 @@ static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size); ...@@ -169,7 +169,7 @@ static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
static int ext3_htree_next_block(struct inode *dir, __u32 hash, static int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame, struct dx_frame *frame,
struct dx_frame *frames, struct dx_frame *frames,
__u32 *start_hash); __u32 *start_hash);
static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry, static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
struct ext3_dir_entry_2 **res_dir, int *err); struct ext3_dir_entry_2 **res_dir, int *err);
...@@ -250,7 +250,7 @@ static void dx_show_index (char * label, struct dx_entry *entries) ...@@ -250,7 +250,7 @@ static void dx_show_index (char * label, struct dx_entry *entries)
} }
struct stats struct stats
{ {
unsigned names; unsigned names;
unsigned space; unsigned space;
unsigned bcount; unsigned bcount;
...@@ -464,7 +464,7 @@ static void dx_release (struct dx_frame *frames) ...@@ -464,7 +464,7 @@ static void dx_release (struct dx_frame *frames)
*/ */
static int ext3_htree_next_block(struct inode *dir, __u32 hash, static int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame, struct dx_frame *frame,
struct dx_frame *frames, struct dx_frame *frames,
__u32 *start_hash) __u32 *start_hash)
{ {
struct dx_frame *p; struct dx_frame *p;
...@@ -632,7 +632,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash, ...@@ -632,7 +632,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
} }
count += ret; count += ret;
hashval = ~0; hashval = ~0;
ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
frame, frames, &hashval); frame, frames, &hashval);
*next_hash = hashval; *next_hash = hashval;
if (ret < 0) { if (ret < 0) {
...@@ -649,7 +649,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash, ...@@ -649,7 +649,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
break; break;
} }
dx_release(frames); dx_release(frames);
dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
count, *next_hash)); count, *next_hash));
return count; return count;
errout: errout:
...@@ -1050,7 +1050,7 @@ struct dentry *ext3_get_parent(struct dentry *child) ...@@ -1050,7 +1050,7 @@ struct dentry *ext3_get_parent(struct dentry *child)
parent = ERR_PTR(-ENOMEM); parent = ERR_PTR(-ENOMEM);
} }
return parent; return parent;
} }
#define S_SHIFT 12 #define S_SHIFT 12
static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = { static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
...@@ -1198,7 +1198,7 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, ...@@ -1198,7 +1198,7 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
* add_dirent_to_buf will attempt search the directory block for * add_dirent_to_buf will attempt search the directory block for
* space. It will return -ENOSPC if no space is available, and -EIO * space. It will return -ENOSPC if no space is available, and -EIO
* and -EEXIST if directory entry already exists. * and -EEXIST if directory entry already exists.
* *
* NOTE! bh is NOT released in the case where ENOSPC is returned. In * NOTE! bh is NOT released in the case where ENOSPC is returned. In
* all other cases bh is released. * all other cases bh is released.
*/ */
...@@ -1572,7 +1572,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, ...@@ -1572,7 +1572,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
* ext3_delete_entry deletes a directory entry by merging it with the * ext3_delete_entry deletes a directory entry by merging it with the
* previous entry * previous entry
*/ */
static int ext3_delete_entry (handle_t *handle, static int ext3_delete_entry (handle_t *handle,
struct inode * dir, struct inode * dir,
struct ext3_dir_entry_2 * de_del, struct ext3_dir_entry_2 * de_del,
struct buffer_head * bh) struct buffer_head * bh)
...@@ -1643,12 +1643,12 @@ static int ext3_add_nondir(handle_t *handle, ...@@ -1643,12 +1643,12 @@ static int ext3_add_nondir(handle_t *handle,
* is so far negative - it has no inode. * is so far negative - it has no inode.
* *
* If the create succeeds, we fill in the inode information * If the create succeeds, we fill in the inode information
* with d_instantiate(). * with d_instantiate().
*/ */
static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
struct nameidata *nd) struct nameidata *nd)
{ {
handle_t *handle; handle_t *handle;
struct inode * inode; struct inode * inode;
int err, retries = 0; int err, retries = 0;
...@@ -1813,7 +1813,7 @@ static int empty_dir (struct inode * inode) ...@@ -1813,7 +1813,7 @@ static int empty_dir (struct inode * inode)
de1 = (struct ext3_dir_entry_2 *) de1 = (struct ext3_dir_entry_2 *)
((char *) de + le16_to_cpu(de->rec_len)); ((char *) de + le16_to_cpu(de->rec_len));
if (le32_to_cpu(de->inode) != inode->i_ino || if (le32_to_cpu(de->inode) != inode->i_ino ||
!le32_to_cpu(de1->inode) || !le32_to_cpu(de1->inode) ||
strcmp (".", de->name) || strcmp (".", de->name) ||
strcmp ("..", de1->name)) { strcmp ("..", de1->name)) {
ext3_warning (inode->i_sb, "empty_dir", ext3_warning (inode->i_sb, "empty_dir",
...@@ -1883,7 +1883,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) ...@@ -1883,7 +1883,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
* being truncated, or files being unlinked. */ * being truncated, or files being unlinked. */
/* @@@ FIXME: Observation from aviro: /* @@@ FIXME: Observation from aviro:
* I think I can trigger J_ASSERT in ext3_orphan_add(). We block * I think I can trigger J_ASSERT in ext3_orphan_add(). We block
* here (on lock_super()), so race with ext3_link() which might bump * here (on lock_super()), so race with ext3_link() which might bump
* ->i_nlink. For, say it, character device. Not a regular file, * ->i_nlink. For, say it, character device. Not a regular file,
* not a directory, not a symlink and ->i_nlink > 0. * not a directory, not a symlink and ->i_nlink > 0.
...@@ -2393,4 +2393,4 @@ struct inode_operations ext3_special_inode_operations = { ...@@ -2393,4 +2393,4 @@ struct inode_operations ext3_special_inode_operations = {
.removexattr = generic_removexattr, .removexattr = generic_removexattr,
#endif #endif
.permission = ext3_permission, .permission = ext3_permission,
}; };
...@@ -62,13 +62,13 @@ static void ext3_unlockfs(struct super_block *sb); ...@@ -62,13 +62,13 @@ static void ext3_unlockfs(struct super_block *sb);
static void ext3_write_super (struct super_block * sb); static void ext3_write_super (struct super_block * sb);
static void ext3_write_super_lockfs(struct super_block *sb); static void ext3_write_super_lockfs(struct super_block *sb);
/* /*
* Wrappers for journal_start/end. * Wrappers for journal_start/end.
* *
* The only special thing we need to do here is to make sure that all * The only special thing we need to do here is to make sure that all
* journal_end calls result in the superblock being marked dirty, so * journal_end calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if * that sync() will call the filesystem's write_super callback if
* appropriate. * appropriate.
*/ */
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
{ {
...@@ -90,11 +90,11 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) ...@@ -90,11 +90,11 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
return journal_start(journal, nblocks); return journal_start(journal, nblocks);
} }
/* /*
* The only special thing we need to do here is to make sure that all * The only special thing we need to do here is to make sure that all
* journal_stop calls result in the superblock being marked dirty, so * journal_stop calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if * that sync() will call the filesystem's write_super callback if
* appropriate. * appropriate.
*/ */
int __ext3_journal_stop(const char *where, handle_t *handle) int __ext3_journal_stop(const char *where, handle_t *handle)
{ {
...@@ -369,7 +369,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) ...@@ -369,7 +369,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
{ {
struct list_head *l; struct list_head *l;
printk(KERN_ERR "sb orphan head is %d\n", printk(KERN_ERR "sb orphan head is %d\n",
le32_to_cpu(sbi->s_es->s_last_orphan)); le32_to_cpu(sbi->s_es->s_last_orphan));
printk(KERN_ERR "sb_info orphan list:\n"); printk(KERN_ERR "sb_info orphan list:\n");
...@@ -378,7 +378,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) ...@@ -378,7 +378,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
printk(KERN_ERR " " printk(KERN_ERR " "
"inode %s:%ld at %p: mode %o, nlink %d, next %d\n", "inode %s:%ld at %p: mode %o, nlink %d, next %d\n",
inode->i_sb->s_id, inode->i_ino, inode, inode->i_sb->s_id, inode->i_ino, inode,
inode->i_mode, inode->i_nlink, inode->i_mode, inode->i_nlink,
NEXT_ORPHAN(inode)); NEXT_ORPHAN(inode));
} }
} }
...@@ -475,7 +475,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) ...@@ -475,7 +475,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
inode_init_once(&ei->vfs_inode); inode_init_once(&ei->vfs_inode);
} }
} }
static int init_inodecache(void) static int init_inodecache(void)
{ {
ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
...@@ -1483,7 +1483,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -1483,7 +1483,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
(EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
printk(KERN_WARNING printk(KERN_WARNING
"EXT3-fs warning: feature flags set on rev 0 fs, " "EXT3-fs warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended\n"); "running e2fsck is recommended\n");
/* /*
...@@ -1509,7 +1509,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -1509,7 +1509,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (blocksize < EXT3_MIN_BLOCK_SIZE || if (blocksize < EXT3_MIN_BLOCK_SIZE ||
blocksize > EXT3_MAX_BLOCK_SIZE) { blocksize > EXT3_MAX_BLOCK_SIZE) {
printk(KERN_ERR printk(KERN_ERR
"EXT3-fs: Unsupported filesystem blocksize %d on %s.\n", "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n",
blocksize, sb->s_id); blocksize, sb->s_id);
goto failed_mount; goto failed_mount;
...@@ -1533,14 +1533,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -1533,14 +1533,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block); bh = sb_bread(sb, logic_sb_block);
if (!bh) { if (!bh) {
printk(KERN_ERR printk(KERN_ERR
"EXT3-fs: Can't read superblock on 2nd try.\n"); "EXT3-fs: Can't read superblock on 2nd try.\n");
goto failed_mount; goto failed_mount;
} }
es = (struct ext3_super_block *)(((char *)bh->b_data) + offset); es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
sbi->s_es = es; sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
printk (KERN_ERR printk (KERN_ERR
"EXT3-fs: Magic mismatch, very weird !\n"); "EXT3-fs: Magic mismatch, very weird !\n");
goto failed_mount; goto failed_mount;
} }
...@@ -1820,7 +1820,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -1820,7 +1820,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
/* /*
* Setup any per-fs journal parameters now. We'll do this both on * Setup any per-fs journal parameters now. We'll do this both on
* initial mount, once the journal has been initialised but before we've * initial mount, once the journal has been initialised but before we've
* done any recovery; and again on any subsequent remount. * done any recovery; and again on any subsequent remount.
*/ */
static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
{ {
......
/* /*
* linux/fs/checkpoint.c * linux/fs/checkpoint.c
* *
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
* *
* Copyright 1999 Red Hat Software --- All Rights Reserved * Copyright 1999 Red Hat Software --- All Rights Reserved
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* the terms of the GNU General Public License, version 2, or at your * the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference. * option, any later version, incorporated herein by reference.
* *
* Checkpoint routines for the generic filesystem journaling code. * Checkpoint routines for the generic filesystem journaling code.
* Part of the ext2fs journaling system. * Part of the ext2fs journaling system.
* *
* Checkpointing is the process of ensuring that a section of the log is * Checkpointing is the process of ensuring that a section of the log is
* committed fully to disk, so that that portion of the log can be * committed fully to disk, so that that portion of the log can be
...@@ -226,7 +226,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) ...@@ -226,7 +226,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
* Try to flush one buffer from the checkpoint list to disk. * Try to flush one buffer from the checkpoint list to disk.
* *
* Return 1 if something happened which requires us to abort the current * Return 1 if something happened which requires us to abort the current
* scan of the checkpoint list. * scan of the checkpoint list.
* *
* Called with j_list_lock held and drops it if 1 is returned * Called with j_list_lock held and drops it if 1 is returned
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
...@@ -270,7 +270,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, ...@@ -270,7 +270,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
* possibly block, while still holding the journal lock. * possibly block, while still holding the journal lock.
* We cannot afford to let the transaction logic start * We cannot afford to let the transaction logic start
* messing around with this buffer before we write it to * messing around with this buffer before we write it to
* disk, as that would break recoverability. * disk, as that would break recoverability.
*/ */
BUFFER_TRACE(bh, "queue"); BUFFER_TRACE(bh, "queue");
get_bh(bh); get_bh(bh);
...@@ -293,7 +293,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, ...@@ -293,7 +293,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
* Perform an actual checkpoint. We take the first transaction on the * Perform an actual checkpoint. We take the first transaction on the
* list of transactions to be checkpointed and send all its buffers * list of transactions to be checkpointed and send all its buffers
* to disk. We submit larger chunks of data at once. * to disk. We submit larger chunks of data at once.
* *
* The journal should be locked before calling this function. * The journal should be locked before calling this function.
*/ */
int log_do_checkpoint(journal_t *journal) int log_do_checkpoint(journal_t *journal)
...@@ -304,10 +304,10 @@ int log_do_checkpoint(journal_t *journal) ...@@ -304,10 +304,10 @@ int log_do_checkpoint(journal_t *journal)
jbd_debug(1, "Start checkpoint\n"); jbd_debug(1, "Start checkpoint\n");
/* /*
* First thing: if there are any transactions in the log which * First thing: if there are any transactions in the log which
* don't need checkpointing, just eliminate them from the * don't need checkpointing, just eliminate them from the
* journal straight away. * journal straight away.
*/ */
result = cleanup_journal_tail(journal); result = cleanup_journal_tail(journal);
jbd_debug(1, "cleanup_journal_tail returned %d\n", result); jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
...@@ -385,9 +385,9 @@ int log_do_checkpoint(journal_t *journal) ...@@ -385,9 +385,9 @@ int log_do_checkpoint(journal_t *journal)
* we have already got rid of any since the last update of the log tail * we have already got rid of any since the last update of the log tail
* in the journal superblock. If so, we can instantly roll the * in the journal superblock. If so, we can instantly roll the
* superblock forward to remove those transactions from the log. * superblock forward to remove those transactions from the log.
* *
* Return <0 on error, 0 on success, 1 if there was nothing to clean up. * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
* *
* Called with the journal lock held. * Called with the journal lock held.
* *
* This is the only part of the journaling code which really needs to be * This is the only part of the journaling code which really needs to be
...@@ -404,8 +404,8 @@ int cleanup_journal_tail(journal_t *journal) ...@@ -404,8 +404,8 @@ int cleanup_journal_tail(journal_t *journal)
unsigned long blocknr, freed; unsigned long blocknr, freed;
/* OK, work out the oldest transaction remaining in the log, and /* OK, work out the oldest transaction remaining in the log, and
* the log block it starts at. * the log block it starts at.
* *
* If the log is now empty, we need to work out which is the * If the log is now empty, we need to work out which is the
* next transaction ID we will write, and where it will * next transaction ID we will write, and where it will
* start. */ * start. */
...@@ -558,7 +558,7 @@ int __journal_clean_checkpoint_list(journal_t *journal) ...@@ -558,7 +558,7 @@ int __journal_clean_checkpoint_list(journal_t *journal)
return ret; return ret;
} }
/* /*
* journal_remove_checkpoint: called after a buffer has been committed * journal_remove_checkpoint: called after a buffer has been committed
* to disk (either by being write-back flushed to disk, or being * to disk (either by being write-back flushed to disk, or being
* committed to the log). * committed to the log).
...@@ -636,7 +636,7 @@ int __journal_remove_checkpoint(struct journal_head *jh) ...@@ -636,7 +636,7 @@ int __journal_remove_checkpoint(struct journal_head *jh)
* Called with the journal locked. * Called with the journal locked.
* Called with j_list_lock held. * Called with j_list_lock held.
*/ */
void __journal_insert_checkpoint(struct journal_head *jh, void __journal_insert_checkpoint(struct journal_head *jh,
transaction_t *transaction) transaction_t *transaction)
{ {
JBUFFER_TRACE(jh, "entry"); JBUFFER_TRACE(jh, "entry");
...@@ -658,7 +658,7 @@ void __journal_insert_checkpoint(struct journal_head *jh, ...@@ -658,7 +658,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
/* /*
* We've finished with this transaction structure: adios... * We've finished with this transaction structure: adios...
* *
* The transaction must have no links except for the checkpoint by this * The transaction must have no links except for the checkpoint by this
* point. * point.
* *
......
...@@ -578,7 +578,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp) ...@@ -578,7 +578,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
* this is a no-op. If needed, we can use j_blk_offset - everything is * this is a no-op. If needed, we can use j_blk_offset - everything is
* ready. * ready.
*/ */
int journal_bmap(journal_t *journal, unsigned long blocknr, int journal_bmap(journal_t *journal, unsigned long blocknr,
unsigned long *retp) unsigned long *retp)
{ {
int err = 0; int err = 0;
...@@ -699,10 +699,10 @@ static journal_t * journal_init_common (void) ...@@ -699,10 +699,10 @@ static journal_t * journal_init_common (void)
* @len: Lenght of the journal in blocks. * @len: Lenght of the journal in blocks.
* @blocksize: blocksize of journalling device * @blocksize: blocksize of journalling device
* @returns: a newly created journal_t * * @returns: a newly created journal_t *
* *
* journal_init_dev creates a journal which maps a fixed contiguous * journal_init_dev creates a journal which maps a fixed contiguous
* range of blocks on an arbitrary block device. * range of blocks on an arbitrary block device.
* *
*/ */
journal_t * journal_init_dev(struct block_device *bdev, journal_t * journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev, struct block_device *fs_dev,
...@@ -739,11 +739,11 @@ journal_t * journal_init_dev(struct block_device *bdev, ...@@ -739,11 +739,11 @@ journal_t * journal_init_dev(struct block_device *bdev,
return journal; return journal;
} }
/** /**
* journal_t * journal_init_inode () - creates a journal which maps to a inode. * journal_t * journal_init_inode () - creates a journal which maps to a inode.
* @inode: An inode to create the journal in * @inode: An inode to create the journal in
* *
* journal_init_inode creates a journal which maps an on-disk inode as * journal_init_inode creates a journal which maps an on-disk inode as
* the journal. The inode must exist already, must support bmap() and * the journal. The inode must exist already, must support bmap() and
* must have all data blocks preallocated. * must have all data blocks preallocated.
...@@ -763,7 +763,7 @@ journal_t * journal_init_inode (struct inode *inode) ...@@ -763,7 +763,7 @@ journal_t * journal_init_inode (struct inode *inode)
journal->j_inode = inode; journal->j_inode = inode;
jbd_debug(1, jbd_debug(1,
"journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n", "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
journal, inode->i_sb->s_id, inode->i_ino, journal, inode->i_sb->s_id, inode->i_ino,
(long long) inode->i_size, (long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
...@@ -798,10 +798,10 @@ journal_t * journal_init_inode (struct inode *inode) ...@@ -798,10 +798,10 @@ journal_t * journal_init_inode (struct inode *inode)
return journal; return journal;
} }
/* /*
* If the journal init or create aborts, we need to mark the journal * If the journal init or create aborts, we need to mark the journal
* superblock as being NULL to prevent the journal destroy from writing * superblock as being NULL to prevent the journal destroy from writing
* back a bogus superblock. * back a bogus superblock.
*/ */
static void journal_fail_superblock (journal_t *journal) static void journal_fail_superblock (journal_t *journal)
{ {
...@@ -844,13 +844,13 @@ static int journal_reset(journal_t *journal) ...@@ -844,13 +844,13 @@ static int journal_reset(journal_t *journal)
return 0; return 0;
} }
/** /**
* int journal_create() - Initialise the new journal file * int journal_create() - Initialise the new journal file
* @journal: Journal to create. This structure must have been initialised * @journal: Journal to create. This structure must have been initialised
* *
* Given a journal_t structure which tells us which disk blocks we can * Given a journal_t structure which tells us which disk blocks we can
* use, create a new journal superblock and initialise all of the * use, create a new journal superblock and initialise all of the
* journal fields from scratch. * journal fields from scratch.
**/ **/
int journal_create(journal_t *journal) int journal_create(journal_t *journal)
{ {
...@@ -915,7 +915,7 @@ int journal_create(journal_t *journal) ...@@ -915,7 +915,7 @@ int journal_create(journal_t *journal)
return journal_reset(journal); return journal_reset(journal);
} }
/** /**
* void journal_update_superblock() - Update journal sb on disk. * void journal_update_superblock() - Update journal sb on disk.
* @journal: The journal to update. * @journal: The journal to update.
* @wait: Set to '0' if you don't want to wait for IO completion. * @wait: Set to '0' if you don't want to wait for IO completion.
...@@ -939,7 +939,7 @@ void journal_update_superblock(journal_t *journal, int wait) ...@@ -939,7 +939,7 @@ void journal_update_superblock(journal_t *journal, int wait)
journal->j_transaction_sequence) { journal->j_transaction_sequence) {
jbd_debug(1,"JBD: Skipping superblock update on recovered sb " jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
"(start %ld, seq %d, errno %d)\n", "(start %ld, seq %d, errno %d)\n",
journal->j_tail, journal->j_tail_sequence, journal->j_tail, journal->j_tail_sequence,
journal->j_errno); journal->j_errno);
goto out; goto out;
} }
...@@ -1062,7 +1062,7 @@ static int load_superblock(journal_t *journal) ...@@ -1062,7 +1062,7 @@ static int load_superblock(journal_t *journal)
/** /**
* int journal_load() - Read journal from disk. * int journal_load() - Read journal from disk.
* @journal: Journal to act on. * @journal: Journal to act on.
* *
* Given a journal_t structure which tells us which disk blocks contain * Given a journal_t structure which tells us which disk blocks contain
* a journal, read the journal from disk to initialise the in-memory * a journal, read the journal from disk to initialise the in-memory
* structures. * structures.
...@@ -1172,9 +1172,9 @@ void journal_destroy(journal_t *journal) ...@@ -1172,9 +1172,9 @@ void journal_destroy(journal_t *journal)
* @compat: bitmask of compatible features * @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount * @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features * @incompat: bitmask of incompatible features
* *
* Check whether the journal uses all of a given set of * Check whether the journal uses all of a given set of
* features. Return true (non-zero) if it does. * features. Return true (non-zero) if it does.
**/ **/
int journal_check_used_features (journal_t *journal, unsigned long compat, int journal_check_used_features (journal_t *journal, unsigned long compat,
...@@ -1203,7 +1203,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat, ...@@ -1203,7 +1203,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
* @compat: bitmask of compatible features * @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount * @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features * @incompat: bitmask of incompatible features
* *
* Check whether the journaling code supports the use of * Check whether the journaling code supports the use of
* all of a given set of features on this journal. Return true * all of a given set of features on this journal. Return true
* (non-zero) if it can. */ * (non-zero) if it can. */
...@@ -1241,7 +1241,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat, ...@@ -1241,7 +1241,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
* @incompat: bitmask of incompatible features * @incompat: bitmask of incompatible features
* *
* Mark a given journal feature as present on the * Mark a given journal feature as present on the
* superblock. Returns true if the requested features could be set. * superblock. Returns true if the requested features could be set.
* *
*/ */
...@@ -1327,7 +1327,7 @@ static int journal_convert_superblock_v1(journal_t *journal, ...@@ -1327,7 +1327,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
/** /**
* int journal_flush () - Flush journal * int journal_flush () - Flush journal
* @journal: Journal to act on. * @journal: Journal to act on.
* *
* Flush all data for a given journal to disk and empty the journal. * Flush all data for a given journal to disk and empty the journal.
* Filesystems can use this when remounting readonly to ensure that * Filesystems can use this when remounting readonly to ensure that
* recovery does not need to happen on remount. * recovery does not need to happen on remount.
...@@ -1394,7 +1394,7 @@ int journal_flush(journal_t *journal) ...@@ -1394,7 +1394,7 @@ int journal_flush(journal_t *journal)
* int journal_wipe() - Wipe journal contents * int journal_wipe() - Wipe journal contents
* @journal: Journal to act on. * @journal: Journal to act on.
* @write: flag (see below) * @write: flag (see below)
* *
* Wipe out all of the contents of a journal, safely. This will produce * Wipe out all of the contents of a journal, safely. This will produce
* a warning if the journal contains any valid recovery information. * a warning if the journal contains any valid recovery information.
* Must be called between journal_init_*() and journal_load(). * Must be called between journal_init_*() and journal_load().
...@@ -1449,7 +1449,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer) ...@@ -1449,7 +1449,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
/* /*
* Journal abort has very specific semantics, which we describe * Journal abort has very specific semantics, which we describe
* for journal abort. * for journal abort.
* *
* Two internal function, which provide abort to te jbd layer * Two internal function, which provide abort to te jbd layer
* itself are here. * itself are here.
...@@ -1504,7 +1504,7 @@ static void __journal_abort_soft (journal_t *journal, int errno) ...@@ -1504,7 +1504,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* Perform a complete, immediate shutdown of the ENTIRE * Perform a complete, immediate shutdown of the ENTIRE
* journal (not of a single transaction). This operation cannot be * journal (not of a single transaction). This operation cannot be
* undone without closing and reopening the journal. * undone without closing and reopening the journal.
* *
* The journal_abort function is intended to support higher level error * The journal_abort function is intended to support higher level error
* recovery mechanisms such as the ext2/ext3 remount-readonly error * recovery mechanisms such as the ext2/ext3 remount-readonly error
* mode. * mode.
...@@ -1538,7 +1538,7 @@ static void __journal_abort_soft (journal_t *journal, int errno) ...@@ -1538,7 +1538,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* supply an errno; a null errno implies that absolutely no further * supply an errno; a null errno implies that absolutely no further
* writes are done to the journal (unless there are any already in * writes are done to the journal (unless there are any already in
* progress). * progress).
* *
*/ */
void journal_abort(journal_t *journal, int errno) void journal_abort(journal_t *journal, int errno)
...@@ -1546,7 +1546,7 @@ void journal_abort(journal_t *journal, int errno) ...@@ -1546,7 +1546,7 @@ void journal_abort(journal_t *journal, int errno)
__journal_abort_soft(journal, errno); __journal_abort_soft(journal, errno);
} }
/** /**
* int journal_errno () - returns the journal's error state. * int journal_errno () - returns the journal's error state.
* @journal: journal to examine. * @journal: journal to examine.
* *
...@@ -1570,7 +1570,7 @@ int journal_errno(journal_t *journal) ...@@ -1570,7 +1570,7 @@ int journal_errno(journal_t *journal)
return err; return err;
} }
/** /**
* int journal_clear_err () - clears the journal's error state * int journal_clear_err () - clears the journal's error state
* @journal: journal to act on. * @journal: journal to act on.
* *
...@@ -1590,7 +1590,7 @@ int journal_clear_err(journal_t *journal) ...@@ -1590,7 +1590,7 @@ int journal_clear_err(journal_t *journal)
return err; return err;
} }
/** /**
* void journal_ack_err() - Ack journal err. * void journal_ack_err() - Ack journal err.
* @journal: journal to act on. * @journal: journal to act on.
* *
...@@ -1612,7 +1612,7 @@ int journal_blocks_per_page(struct inode *inode) ...@@ -1612,7 +1612,7 @@ int journal_blocks_per_page(struct inode *inode)
/* /*
* Simple support for retrying memory allocations. Introduced to help to * Simple support for retrying memory allocations. Introduced to help to
* debug different VM deadlock avoidance strategies. * debug different VM deadlock avoidance strategies.
*/ */
void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
{ {
......
/* /*
* linux/fs/recovery.c * linux/fs/recovery.c
* *
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
* *
* Copyright 1999-2000 Red Hat Software --- All Rights Reserved * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* option, any later version, incorporated herein by reference. * option, any later version, incorporated herein by reference.
* *
* Journal recovery routines for the generic filesystem journaling code; * Journal recovery routines for the generic filesystem journaling code;
* part of the ext2fs journaling system. * part of the ext2fs journaling system.
*/ */
#ifndef __KERNEL__ #ifndef __KERNEL__
...@@ -25,9 +25,9 @@ ...@@ -25,9 +25,9 @@
/* /*
* Maintain information about the progress of the recovery job, so that * Maintain information about the progress of the recovery job, so that
* the different passes can carry information between them. * the different passes can carry information between them.
*/ */
struct recovery_info struct recovery_info
{ {
tid_t start_transaction; tid_t start_transaction;
tid_t end_transaction; tid_t end_transaction;
...@@ -116,7 +116,7 @@ static int do_readahead(journal_t *journal, unsigned int start) ...@@ -116,7 +116,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
err = 0; err = 0;
failed: failed:
if (nbufs) if (nbufs)
journal_brelse_array(bufs, nbufs); journal_brelse_array(bufs, nbufs);
return err; return err;
} }
...@@ -128,7 +128,7 @@ static int do_readahead(journal_t *journal, unsigned int start) ...@@ -128,7 +128,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
* Read a block from the journal * Read a block from the journal
*/ */
static int jread(struct buffer_head **bhp, journal_t *journal, static int jread(struct buffer_head **bhp, journal_t *journal,
unsigned int offset) unsigned int offset)
{ {
int err; int err;
...@@ -212,14 +212,14 @@ do { \ ...@@ -212,14 +212,14 @@ do { \
/** /**
* journal_recover - recovers a on-disk journal * journal_recover - recovers a on-disk journal
* @journal: the journal to recover * @journal: the journal to recover
* *
* The primary function for recovering the log contents when mounting a * The primary function for recovering the log contents when mounting a
* journaled device. * journaled device.
* *
* Recovery is done in three passes. In the first pass, we look for the * Recovery is done in three passes. In the first pass, we look for the
* end of the log. In the second, we assemble the list of revoke * end of the log. In the second, we assemble the list of revoke
* blocks. In the third and final pass, we replay any un-revoked blocks * blocks. In the third and final pass, we replay any un-revoked blocks
* in the log. * in the log.
*/ */
int journal_recover(journal_t *journal) int journal_recover(journal_t *journal)
{ {
...@@ -231,10 +231,10 @@ int journal_recover(journal_t *journal) ...@@ -231,10 +231,10 @@ int journal_recover(journal_t *journal)
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
sb = journal->j_superblock; sb = journal->j_superblock;
/* /*
* The journal superblock's s_start field (the current log head) * The journal superblock's s_start field (the current log head)
* is always zero if, and only if, the journal was cleanly * is always zero if, and only if, the journal was cleanly
* unmounted. * unmounted.
*/ */
if (!sb->s_start) { if (!sb->s_start) {
...@@ -253,7 +253,7 @@ int journal_recover(journal_t *journal) ...@@ -253,7 +253,7 @@ int journal_recover(journal_t *journal)
jbd_debug(0, "JBD: recovery, exit status %d, " jbd_debug(0, "JBD: recovery, exit status %d, "
"recovered transactions %u to %u\n", "recovered transactions %u to %u\n",
err, info.start_transaction, info.end_transaction); err, info.start_transaction, info.end_transaction);
jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n", jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
info.nr_replays, info.nr_revoke_hits, info.nr_revokes); info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
/* Restart the log at the next transaction ID, thus invalidating /* Restart the log at the next transaction ID, thus invalidating
...@@ -268,15 +268,15 @@ int journal_recover(journal_t *journal) ...@@ -268,15 +268,15 @@ int journal_recover(journal_t *journal)
/** /**
* journal_skip_recovery - Start journal and wipe exiting records * journal_skip_recovery - Start journal and wipe exiting records
* @journal: journal to startup * @journal: journal to startup
* *
* Locate any valid recovery information from the journal and set up the * Locate any valid recovery information from the journal and set up the
* journal structures in memory to ignore it (presumably because the * journal structures in memory to ignore it (presumably because the
* caller has evidence that it is out of date). * caller has evidence that it is out of date).
* This function does'nt appear to be exorted.. * This function does'nt appear to be exorted..
* *
* We perform one pass over the journal to allow us to tell the user how * We perform one pass over the journal to allow us to tell the user how
* much recovery information is being erased, and to let us initialise * much recovery information is being erased, and to let us initialise
* the journal transaction sequence numbers to the next unused ID. * the journal transaction sequence numbers to the next unused ID.
*/ */
int journal_skip_recovery(journal_t *journal) int journal_skip_recovery(journal_t *journal)
{ {
...@@ -297,7 +297,7 @@ int journal_skip_recovery(journal_t *journal) ...@@ -297,7 +297,7 @@ int journal_skip_recovery(journal_t *journal)
#ifdef CONFIG_JBD_DEBUG #ifdef CONFIG_JBD_DEBUG
int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence); int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
#endif #endif
jbd_debug(0, jbd_debug(0,
"JBD: ignoring %d transaction%s from the journal.\n", "JBD: ignoring %d transaction%s from the journal.\n",
dropped, (dropped == 1) ? "" : "s"); dropped, (dropped == 1) ? "" : "s");
journal->j_transaction_sequence = ++info.end_transaction; journal->j_transaction_sequence = ++info.end_transaction;
...@@ -324,10 +324,10 @@ static int do_one_pass(journal_t *journal, ...@@ -324,10 +324,10 @@ static int do_one_pass(journal_t *journal,
MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t)) MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
/ sizeof(journal_block_tag_t)); / sizeof(journal_block_tag_t));
/* /*
* First thing is to establish what we expect to find in the log * First thing is to establish what we expect to find in the log
* (in terms of transaction IDs), and where (in terms of log * (in terms of transaction IDs), and where (in terms of log
* block offsets): query the superblock. * block offsets): query the superblock.
*/ */
sb = journal->j_superblock; sb = journal->j_superblock;
...@@ -344,7 +344,7 @@ static int do_one_pass(journal_t *journal, ...@@ -344,7 +344,7 @@ static int do_one_pass(journal_t *journal,
* Now we walk through the log, transaction by transaction, * Now we walk through the log, transaction by transaction,
* making sure that each transaction has a commit block in the * making sure that each transaction has a commit block in the
* expected place. Each complete transaction gets replayed back * expected place. Each complete transaction gets replayed back
* into the main filesystem. * into the main filesystem.
*/ */
while (1) { while (1) {
...@@ -379,8 +379,8 @@ static int do_one_pass(journal_t *journal, ...@@ -379,8 +379,8 @@ static int do_one_pass(journal_t *journal,
next_log_block++; next_log_block++;
wrap(journal, next_log_block); wrap(journal, next_log_block);
/* What kind of buffer is it? /* What kind of buffer is it?
* *
* If it is a descriptor block, check that it has the * If it is a descriptor block, check that it has the
* expected sequence number. Otherwise, we're all done * expected sequence number. Otherwise, we're all done
* here. */ * here. */
...@@ -394,7 +394,7 @@ static int do_one_pass(journal_t *journal, ...@@ -394,7 +394,7 @@ static int do_one_pass(journal_t *journal,
blocktype = be32_to_cpu(tmp->h_blocktype); blocktype = be32_to_cpu(tmp->h_blocktype);
sequence = be32_to_cpu(tmp->h_sequence); sequence = be32_to_cpu(tmp->h_sequence);
jbd_debug(3, "Found magic %d, sequence %d\n", jbd_debug(3, "Found magic %d, sequence %d\n",
blocktype, sequence); blocktype, sequence);
if (sequence != next_commit_ID) { if (sequence != next_commit_ID) {
...@@ -438,7 +438,7 @@ static int do_one_pass(journal_t *journal, ...@@ -438,7 +438,7 @@ static int do_one_pass(journal_t *journal,
/* Recover what we can, but /* Recover what we can, but
* report failure at the end. */ * report failure at the end. */
success = err; success = err;
printk (KERN_ERR printk (KERN_ERR
"JBD: IO error %d recovering " "JBD: IO error %d recovering "
"block %ld in log\n", "block %ld in log\n",
err, io_block); err, io_block);
...@@ -452,7 +452,7 @@ static int do_one_pass(journal_t *journal, ...@@ -452,7 +452,7 @@ static int do_one_pass(journal_t *journal,
* revoked, then we're all done * revoked, then we're all done
* here. */ * here. */
if (journal_test_revoke if (journal_test_revoke
(journal, blocknr, (journal, blocknr,
next_commit_ID)) { next_commit_ID)) {
brelse(obh); brelse(obh);
++info->nr_revoke_hits; ++info->nr_revoke_hits;
...@@ -465,7 +465,7 @@ static int do_one_pass(journal_t *journal, ...@@ -465,7 +465,7 @@ static int do_one_pass(journal_t *journal,
blocknr, blocknr,
journal->j_blocksize); journal->j_blocksize);
if (nbh == NULL) { if (nbh == NULL) {
printk(KERN_ERR printk(KERN_ERR
"JBD: Out of memory " "JBD: Out of memory "
"during recovery.\n"); "during recovery.\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -537,7 +537,7 @@ static int do_one_pass(journal_t *journal, ...@@ -537,7 +537,7 @@ static int do_one_pass(journal_t *journal,
} }
done: done:
/* /*
* We broke out of the log scan loop: either we came to the * We broke out of the log scan loop: either we came to the
* known end of the log or we found an unexpected block in the * known end of the log or we found an unexpected block in the
* log. If the latter happened, then we know that the "current" * log. If the latter happened, then we know that the "current"
...@@ -567,7 +567,7 @@ static int do_one_pass(journal_t *journal, ...@@ -567,7 +567,7 @@ static int do_one_pass(journal_t *journal,
/* Scan a revoke record, marking all blocks mentioned as revoked. */ /* Scan a revoke record, marking all blocks mentioned as revoked. */
static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
tid_t sequence, struct recovery_info *info) tid_t sequence, struct recovery_info *info)
{ {
journal_revoke_header_t *header; journal_revoke_header_t *header;
......
/* /*
* linux/fs/revoke.c * linux/fs/revoke.c
* *
* Written by Stephen C. Tweedie <sct@redhat.com>, 2000 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
* *
* Copyright 2000 Red Hat corp --- All Rights Reserved * Copyright 2000 Red Hat corp --- All Rights Reserved
...@@ -15,10 +15,10 @@ ...@@ -15,10 +15,10 @@
* Revoke is the mechanism used to prevent old log records for deleted * Revoke is the mechanism used to prevent old log records for deleted
* metadata from being replayed on top of newer data using the same * metadata from being replayed on top of newer data using the same
* blocks. The revoke mechanism is used in two separate places: * blocks. The revoke mechanism is used in two separate places:
* *
* + Commit: during commit we write the entire list of the current * + Commit: during commit we write the entire list of the current
* transaction's revoked blocks to the journal * transaction's revoked blocks to the journal
* *
* + Recovery: during recovery we record the transaction ID of all * + Recovery: during recovery we record the transaction ID of all
* revoked blocks. If there are multiple revoke records in the log * revoked blocks. If there are multiple revoke records in the log
* for a single block, only the last one counts, and if there is a log * for a single block, only the last one counts, and if there is a log
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* single transaction: * single transaction:
* *
* Block is revoked and then journaled: * Block is revoked and then journaled:
* The desired end result is the journaling of the new block, so we * The desired end result is the journaling of the new block, so we
* cancel the revoke before the transaction commits. * cancel the revoke before the transaction commits.
* *
* Block is journaled and then revoked: * Block is journaled and then revoked:
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
* transaction must have happened after the block was journaled and so * transaction must have happened after the block was journaled and so
* the revoke must take precedence. * the revoke must take precedence.
* *
* Block is revoked and then written as data: * Block is revoked and then written as data:
* The data write is allowed to succeed, but the revoke is _not_ * The data write is allowed to succeed, but the revoke is _not_
* cancelled. We still need to prevent old log records from * cancelled. We still need to prevent old log records from
* overwriting the new data. We don't even need to clear the revoke * overwriting the new data. We don't even need to clear the revoke
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
* buffer has not been revoked, and cancel_revoke * buffer has not been revoked, and cancel_revoke
* need do nothing. * need do nothing.
* RevokeValid set, Revoked set: * RevokeValid set, Revoked set:
* buffer has been revoked. * buffer has been revoked.
*/ */
#ifndef __KERNEL__ #ifndef __KERNEL__
...@@ -77,7 +77,7 @@ static kmem_cache_t *revoke_table_cache; ...@@ -77,7 +77,7 @@ static kmem_cache_t *revoke_table_cache;
journal replay, this involves recording the transaction ID of the journal replay, this involves recording the transaction ID of the
last transaction to revoke this block. */ last transaction to revoke this block. */
struct jbd_revoke_record_s struct jbd_revoke_record_s
{ {
struct list_head hash; struct list_head hash;
tid_t sequence; /* Used for recovery only */ tid_t sequence; /* Used for recovery only */
...@@ -90,8 +90,8 @@ struct jbd_revoke_table_s ...@@ -90,8 +90,8 @@ struct jbd_revoke_table_s
{ {
/* It is conceivable that we might want a larger hash table /* It is conceivable that we might want a larger hash table
* for recovery. Must be a power of two. */ * for recovery. Must be a power of two. */
int hash_size; int hash_size;
int hash_shift; int hash_shift;
struct list_head *hash_table; struct list_head *hash_table;
}; };
...@@ -301,22 +301,22 @@ void journal_destroy_revoke(journal_t *journal) ...@@ -301,22 +301,22 @@ void journal_destroy_revoke(journal_t *journal)
#ifdef __KERNEL__ #ifdef __KERNEL__
/* /*
* journal_revoke: revoke a given buffer_head from the journal. This * journal_revoke: revoke a given buffer_head from the journal. This
* prevents the block from being replayed during recovery if we take a * prevents the block from being replayed during recovery if we take a
* crash after this current transaction commits. Any subsequent * crash after this current transaction commits. Any subsequent
* metadata writes of the buffer in this transaction cancel the * metadata writes of the buffer in this transaction cancel the
* revoke. * revoke.
* *
* Note that this call may block --- it is up to the caller to make * Note that this call may block --- it is up to the caller to make
* sure that there are no further calls to journal_write_metadata * sure that there are no further calls to journal_write_metadata
* before the revoke is complete. In ext3, this implies calling the * before the revoke is complete. In ext3, this implies calling the
* revoke before clearing the block bitmap when we are deleting * revoke before clearing the block bitmap when we are deleting
* metadata. * metadata.
* *
* Revoke performs a journal_forget on any buffer_head passed in as a * Revoke performs a journal_forget on any buffer_head passed in as a
* parameter, but does _not_ forget the buffer_head if the bh was only * parameter, but does _not_ forget the buffer_head if the bh was only
* found implicitly. * found implicitly.
* *
* bh_in may not be a journalled buffer - it may have come off * bh_in may not be a journalled buffer - it may have come off
* the hash tables without an attached journal_head. * the hash tables without an attached journal_head.
...@@ -325,7 +325,7 @@ void journal_destroy_revoke(journal_t *journal) ...@@ -325,7 +325,7 @@ void journal_destroy_revoke(journal_t *journal)
* by one. * by one.
*/ */
int journal_revoke(handle_t *handle, unsigned long blocknr, int journal_revoke(handle_t *handle, unsigned long blocknr,
struct buffer_head *bh_in) struct buffer_head *bh_in)
{ {
struct buffer_head *bh = NULL; struct buffer_head *bh = NULL;
...@@ -487,7 +487,7 @@ void journal_switch_revoke_table(journal_t *journal) ...@@ -487,7 +487,7 @@ void journal_switch_revoke_table(journal_t *journal)
else else
journal->j_revoke = journal->j_revoke_table[0]; journal->j_revoke = journal->j_revoke_table[0];
for (i = 0; i < journal->j_revoke->hash_size; i++) for (i = 0; i < journal->j_revoke->hash_size; i++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
} }
...@@ -498,7 +498,7 @@ void journal_switch_revoke_table(journal_t *journal) ...@@ -498,7 +498,7 @@ void journal_switch_revoke_table(journal_t *journal)
* Called with the journal lock held. * Called with the journal lock held.
*/ */
void journal_write_revoke_records(journal_t *journal, void journal_write_revoke_records(journal_t *journal,
transaction_t *transaction) transaction_t *transaction)
{ {
struct journal_head *descriptor; struct journal_head *descriptor;
...@@ -507,7 +507,7 @@ void journal_write_revoke_records(journal_t *journal, ...@@ -507,7 +507,7 @@ void journal_write_revoke_records(journal_t *journal,
struct list_head *hash_list; struct list_head *hash_list;
int i, offset, count; int i, offset, count;
descriptor = NULL; descriptor = NULL;
offset = 0; offset = 0;
count = 0; count = 0;
...@@ -519,10 +519,10 @@ void journal_write_revoke_records(journal_t *journal, ...@@ -519,10 +519,10 @@ void journal_write_revoke_records(journal_t *journal,
hash_list = &revoke->hash_table[i]; hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) { while (!list_empty(hash_list)) {
record = (struct jbd_revoke_record_s *) record = (struct jbd_revoke_record_s *)
hash_list->next; hash_list->next;
write_one_revoke_record(journal, transaction, write_one_revoke_record(journal, transaction,
&descriptor, &offset, &descriptor, &offset,
record); record);
count++; count++;
list_del(&record->hash); list_del(&record->hash);
...@@ -534,14 +534,14 @@ void journal_write_revoke_records(journal_t *journal, ...@@ -534,14 +534,14 @@ void journal_write_revoke_records(journal_t *journal,
jbd_debug(1, "Wrote %d revoke records\n", count); jbd_debug(1, "Wrote %d revoke records\n", count);
} }
/* /*
* Write out one revoke record. We need to create a new descriptor * Write out one revoke record. We need to create a new descriptor
* block if the old one is full or if we have not already created one. * block if the old one is full or if we have not already created one.
*/ */
static void write_one_revoke_record(journal_t *journal, static void write_one_revoke_record(journal_t *journal,
transaction_t *transaction, transaction_t *transaction,
struct journal_head **descriptorp, struct journal_head **descriptorp,
int *offsetp, int *offsetp,
struct jbd_revoke_record_s *record) struct jbd_revoke_record_s *record)
{ {
...@@ -584,21 +584,21 @@ static void write_one_revoke_record(journal_t *journal, ...@@ -584,21 +584,21 @@ static void write_one_revoke_record(journal_t *journal,
*descriptorp = descriptor; *descriptorp = descriptor;
} }
* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
cpu_to_be32(record->blocknr); cpu_to_be32(record->blocknr);
offset += 4; offset += 4;
*offsetp = offset; *offsetp = offset;
} }
/* /*
* Flush a revoke descriptor out to the journal. If we are aborting, * Flush a revoke descriptor out to the journal. If we are aborting,
* this is a noop; otherwise we are generating a buffer which needs to * this is a noop; otherwise we are generating a buffer which needs to
* be waited for during commit, so it has to go onto the appropriate * be waited for during commit, so it has to go onto the appropriate
* journal buffer list. * journal buffer list.
*/ */
static void flush_descriptor(journal_t *journal, static void flush_descriptor(journal_t *journal,
struct journal_head *descriptor, struct journal_head *descriptor,
int offset) int offset)
{ {
journal_revoke_header_t *header; journal_revoke_header_t *header;
...@@ -618,7 +618,7 @@ static void flush_descriptor(journal_t *journal, ...@@ -618,7 +618,7 @@ static void flush_descriptor(journal_t *journal,
} }
#endif #endif
/* /*
* Revoke support for recovery. * Revoke support for recovery.
* *
* Recovery needs to be able to: * Recovery needs to be able to:
...@@ -629,7 +629,7 @@ static void flush_descriptor(journal_t *journal, ...@@ -629,7 +629,7 @@ static void flush_descriptor(journal_t *journal,
* check whether a given block in a given transaction should be replayed * check whether a given block in a given transaction should be replayed
* (ie. has not been revoked by a revoke record in that or a subsequent * (ie. has not been revoked by a revoke record in that or a subsequent
* transaction) * transaction)
* *
* empty the revoke table after recovery. * empty the revoke table after recovery.
*/ */
...@@ -637,11 +637,11 @@ static void flush_descriptor(journal_t *journal, ...@@ -637,11 +637,11 @@ static void flush_descriptor(journal_t *journal,
* First, setting revoke records. We create a new revoke record for * First, setting revoke records. We create a new revoke record for
* every block ever revoked in the log as we scan it for recovery, and * every block ever revoked in the log as we scan it for recovery, and
* we update the existing records if we find multiple revokes for a * we update the existing records if we find multiple revokes for a
* single block. * single block.
*/ */
int journal_set_revoke(journal_t *journal, int journal_set_revoke(journal_t *journal,
unsigned long blocknr, unsigned long blocknr,
tid_t sequence) tid_t sequence)
{ {
struct jbd_revoke_record_s *record; struct jbd_revoke_record_s *record;
...@@ -653,18 +653,18 @@ int journal_set_revoke(journal_t *journal, ...@@ -653,18 +653,18 @@ int journal_set_revoke(journal_t *journal,
if (tid_gt(sequence, record->sequence)) if (tid_gt(sequence, record->sequence))
record->sequence = sequence; record->sequence = sequence;
return 0; return 0;
} }
return insert_revoke_hash(journal, blocknr, sequence); return insert_revoke_hash(journal, blocknr, sequence);
} }
/* /*
* Test revoke records. For a given block referenced in the log, has * Test revoke records. For a given block referenced in the log, has
* that block been revoked? A revoke record with a given transaction * that block been revoked? A revoke record with a given transaction
* sequence number revokes all blocks in that transaction and earlier * sequence number revokes all blocks in that transaction and earlier
* ones, but later transactions still need replayed. * ones, but later transactions still need replayed.
*/ */
int journal_test_revoke(journal_t *journal, int journal_test_revoke(journal_t *journal,
unsigned long blocknr, unsigned long blocknr,
tid_t sequence) tid_t sequence)
{ {
......
This diff is collapsed.
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
/* Define the number of blocks we need to account to a transaction to /* Define the number of blocks we need to account to a transaction to
* modify one block of data. * modify one block of data.
* *
* We may have to touch one inode, one bitmap buffer, up to three * We may have to touch one inode, one bitmap buffer, up to three
* indirection blocks, the group and superblock summaries, and the data * indirection blocks, the group and superblock summaries, and the data
* block to complete the transaction. */ * block to complete the transaction. */
...@@ -88,16 +88,16 @@ ...@@ -88,16 +88,16 @@
#endif #endif
int int
ext3_mark_iloc_dirty(handle_t *handle, ext3_mark_iloc_dirty(handle_t *handle,
struct inode *inode, struct inode *inode,
struct ext3_iloc *iloc); struct ext3_iloc *iloc);
/* /*
* On success, We end up with an outstanding reference count against * On success, We end up with an outstanding reference count against
* iloc->bh. This _must_ be cleaned up later. * iloc->bh. This _must_ be cleaned up later.
*/ */
int ext3_reserve_inode_write(handle_t *handle, struct inode *inode, int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext3_iloc *iloc); struct ext3_iloc *iloc);
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode); int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
......
/* /*
* linux/include/linux/jbd.h * linux/include/linux/jbd.h
* *
* Written by Stephen C. Tweedie <sct@redhat.com> * Written by Stephen C. Tweedie <sct@redhat.com>
* *
* Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
...@@ -97,8 +97,8 @@ extern void jbd_slab_free(void *ptr, size_t size); ...@@ -97,8 +97,8 @@ extern void jbd_slab_free(void *ptr, size_t size);
* number of outstanding buffers possible at any time. When the * number of outstanding buffers possible at any time. When the
* operation completes, any buffer credits not used are credited back to * operation completes, any buffer credits not used are credited back to
* the transaction, so that at all times we know how many buffers the * the transaction, so that at all times we know how many buffers the
* outstanding updates on a transaction might possibly touch. * outstanding updates on a transaction might possibly touch.
* *
* This is an opaque datatype. * This is an opaque datatype.
**/ **/
typedef struct handle_s handle_t; /* Atomic operation type */ typedef struct handle_s handle_t; /* Atomic operation type */
...@@ -108,7 +108,7 @@ typedef struct handle_s handle_t; /* Atomic operation type */ ...@@ -108,7 +108,7 @@ typedef struct handle_s handle_t; /* Atomic operation type */
* typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
* *
* journal_t is linked to from the fs superblock structure. * journal_t is linked to from the fs superblock structure.
* *
* We use the journal_t to keep track of all outstanding transaction * We use the journal_t to keep track of all outstanding transaction
* activity on the filesystem, and to manage the state of the log * activity on the filesystem, and to manage the state of the log
* writing process. * writing process.
...@@ -128,7 +128,7 @@ typedef struct journal_s journal_t; /* Journal control structure */ ...@@ -128,7 +128,7 @@ typedef struct journal_s journal_t; /* Journal control structure */
* On-disk structures * On-disk structures
*/ */
/* /*
* Descriptor block types: * Descriptor block types:
*/ */
...@@ -149,8 +149,8 @@ typedef struct journal_header_s ...@@ -149,8 +149,8 @@ typedef struct journal_header_s
} journal_header_t; } journal_header_t;
/* /*
* The block tag: used to describe a single buffer in the journal * The block tag: used to describe a single buffer in the journal
*/ */
typedef struct journal_block_tag_s typedef struct journal_block_tag_s
{ {
...@@ -158,9 +158,9 @@ typedef struct journal_block_tag_s ...@@ -158,9 +158,9 @@ typedef struct journal_block_tag_s
__be32 t_flags; /* See below */ __be32 t_flags; /* See below */
} journal_block_tag_t; } journal_block_tag_t;
/* /*
* The revoke descriptor: used on disk to describe a series of blocks to * The revoke descriptor: used on disk to describe a series of blocks to
* be revoked from the log * be revoked from the log
*/ */
typedef struct journal_revoke_header_s typedef struct journal_revoke_header_s
{ {
...@@ -374,10 +374,10 @@ struct jbd_revoke_table_s; ...@@ -374,10 +374,10 @@ struct jbd_revoke_table_s;
**/ **/
/* Docbook can't yet cope with the bit fields, but will leave the documentation /* Docbook can't yet cope with the bit fields, but will leave the documentation
* in so it can be fixed later. * in so it can be fixed later.
*/ */
struct handle_s struct handle_s
{ {
/* Which compound transaction is this update a part of? */ /* Which compound transaction is this update a part of? */
transaction_t *h_transaction; transaction_t *h_transaction;
...@@ -435,7 +435,7 @@ struct handle_s ...@@ -435,7 +435,7 @@ struct handle_s
* *
*/ */
struct transaction_s struct transaction_s
{ {
/* Pointer to the journal for this transaction. [no locking] */ /* Pointer to the journal for this transaction. [no locking] */
journal_t *t_journal; journal_t *t_journal;
...@@ -455,7 +455,7 @@ struct transaction_s ...@@ -455,7 +455,7 @@ struct transaction_s
T_RUNDOWN, T_RUNDOWN,
T_FLUSH, T_FLUSH,
T_COMMIT, T_COMMIT,
T_FINISHED T_FINISHED
} t_state; } t_state;
/* /*
...@@ -569,7 +569,7 @@ struct transaction_s ...@@ -569,7 +569,7 @@ struct transaction_s
* journal_t. * journal_t.
* @j_flags: General journaling state flags * @j_flags: General journaling state flags
* @j_errno: Is there an outstanding uncleared error on the journal (from a * @j_errno: Is there an outstanding uncleared error on the journal (from a
* prior abort)? * prior abort)?
* @j_sb_buffer: First part of superblock buffer * @j_sb_buffer: First part of superblock buffer
* @j_superblock: Second part of superblock buffer * @j_superblock: Second part of superblock buffer
* @j_format_version: Version of the superblock format * @j_format_version: Version of the superblock format
...@@ -583,7 +583,7 @@ struct transaction_s ...@@ -583,7 +583,7 @@ struct transaction_s
* @j_wait_transaction_locked: Wait queue for waiting for a locked transaction * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
* to start committing, or for a barrier lock to be released * to start committing, or for a barrier lock to be released
* @j_wait_logspace: Wait queue for waiting for checkpointing to complete * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
* @j_wait_done_commit: Wait queue for waiting for commit to complete * @j_wait_done_commit: Wait queue for waiting for commit to complete
* @j_wait_checkpoint: Wait queue to trigger checkpointing * @j_wait_checkpoint: Wait queue to trigger checkpointing
* @j_wait_commit: Wait queue to trigger commit * @j_wait_commit: Wait queue to trigger commit
* @j_wait_updates: Wait queue to wait for updates to complete * @j_wait_updates: Wait queue to wait for updates to complete
...@@ -592,7 +592,7 @@ struct transaction_s ...@@ -592,7 +592,7 @@ struct transaction_s
* @j_tail: Journal tail - identifies the oldest still-used block in the * @j_tail: Journal tail - identifies the oldest still-used block in the
* journal. * journal.
* @j_free: Journal free - how many free blocks are there in the journal? * @j_free: Journal free - how many free blocks are there in the journal?
* @j_first: The block number of the first usable block * @j_first: The block number of the first usable block
* @j_last: The block number one beyond the last usable block * @j_last: The block number one beyond the last usable block
* @j_dev: Device where we store the journal * @j_dev: Device where we store the journal
* @j_blocksize: blocksize for the location where we store the journal. * @j_blocksize: blocksize for the location where we store the journal.
...@@ -604,12 +604,12 @@ struct transaction_s ...@@ -604,12 +604,12 @@ struct transaction_s
* @j_list_lock: Protects the buffer lists and internal buffer state. * @j_list_lock: Protects the buffer lists and internal buffer state.
* @j_inode: Optional inode where we store the journal. If present, all journal * @j_inode: Optional inode where we store the journal. If present, all journal
* block numbers are mapped into this inode via bmap(). * block numbers are mapped into this inode via bmap().
* @j_tail_sequence: Sequence number of the oldest transaction in the log * @j_tail_sequence: Sequence number of the oldest transaction in the log
* @j_transaction_sequence: Sequence number of the next transaction to grant * @j_transaction_sequence: Sequence number of the next transaction to grant
* @j_commit_sequence: Sequence number of the most recently committed * @j_commit_sequence: Sequence number of the most recently committed
* transaction * transaction
* @j_commit_request: Sequence number of the most recent transaction wanting * @j_commit_request: Sequence number of the most recent transaction wanting
* commit * commit
* @j_uuid: Uuid of client object. * @j_uuid: Uuid of client object.
* @j_task: Pointer to the current commit thread for this journal * @j_task: Pointer to the current commit thread for this journal
* @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
...@@ -823,8 +823,8 @@ struct journal_s ...@@ -823,8 +823,8 @@ struct journal_s
void *j_private; void *j_private;
}; };
/* /*
* Journal flag definitions * Journal flag definitions
*/ */
#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */ #define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */ #define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
...@@ -833,7 +833,7 @@ struct journal_s ...@@ -833,7 +833,7 @@ struct journal_s
#define JFS_LOADED 0x010 /* The journal superblock has been loaded */ #define JFS_LOADED 0x010 /* The journal superblock has been loaded */
#define JFS_BARRIER 0x020 /* Use IDE barriers */ #define JFS_BARRIER 0x020 /* Use IDE barriers */
/* /*
* Function declarations for the journaling transaction and buffer * Function declarations for the journaling transaction and buffer
* management * management
*/ */
...@@ -862,7 +862,7 @@ int __journal_remove_checkpoint(struct journal_head *); ...@@ -862,7 +862,7 @@ int __journal_remove_checkpoint(struct journal_head *);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *); void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */ /* Buffer IO */
extern int extern int
journal_write_metadata_buffer(transaction_t *transaction, journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in, struct journal_head *jh_in,
struct journal_head **jh_out, struct journal_head **jh_out,
...@@ -890,7 +890,7 @@ static inline handle_t *journal_current_handle(void) ...@@ -890,7 +890,7 @@ static inline handle_t *journal_current_handle(void)
/* The journaling code user interface: /* The journaling code user interface:
* *
* Create and destroy handles * Create and destroy handles
* Register buffer modifications against the current transaction. * Register buffer modifications against the current transaction.
*/ */
extern handle_t *journal_start(journal_t *, int nblocks); extern handle_t *journal_start(journal_t *, int nblocks);
...@@ -917,11 +917,11 @@ extern journal_t * journal_init_dev(struct block_device *bdev, ...@@ -917,11 +917,11 @@ extern journal_t * journal_init_dev(struct block_device *bdev,
int start, int len, int bsize); int start, int len, int bsize);
extern journal_t * journal_init_inode (struct inode *); extern journal_t * journal_init_inode (struct inode *);
extern int journal_update_format (journal_t *); extern int journal_update_format (journal_t *);
extern int journal_check_used_features extern int journal_check_used_features
(journal_t *, unsigned long, unsigned long, unsigned long); (journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_check_available_features extern int journal_check_available_features
(journal_t *, unsigned long, unsigned long, unsigned long); (journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_set_features extern int journal_set_features
(journal_t *, unsigned long, unsigned long, unsigned long); (journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_create (journal_t *); extern int journal_create (journal_t *);
extern int journal_load (journal_t *journal); extern int journal_load (journal_t *journal);
...@@ -1015,7 +1015,7 @@ do { \ ...@@ -1015,7 +1015,7 @@ do { \
* bit, when set, indicates that we have had a fatal error somewhere, * bit, when set, indicates that we have had a fatal error somewhere,
* either inside the journaling layer or indicated to us by the client * either inside the journaling layer or indicated to us by the client
* (eg. ext3), and that we and should not commit any further * (eg. ext3), and that we and should not commit any further
* transactions. * transactions.
*/ */
static inline int is_journal_aborted(journal_t *journal) static inline int is_journal_aborted(journal_t *journal)
...@@ -1082,7 +1082,7 @@ static inline int jbd_space_needed(journal_t *journal) ...@@ -1082,7 +1082,7 @@ static inline int jbd_space_needed(journal_t *journal)
#define BJ_Reserved 7 /* Buffer is reserved for access by journal */ #define BJ_Reserved 7 /* Buffer is reserved for access by journal */
#define BJ_Locked 8 /* Locked for I/O during commit */ #define BJ_Locked 8 /* Locked for I/O during commit */
#define BJ_Types 9 #define BJ_Types 9
extern int jbd_blocks_per_page(struct inode *inode); extern int jbd_blocks_per_page(struct inode *inode);
#ifdef __KERNEL__ #ifdef __KERNEL__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment