Commit 0222e657 authored by Jeff Mahoney's avatar Jeff Mahoney Committed by Linus Torvalds

reiserfs: strip trailing whitespace

This patch strips trailing whitespace from the reiserfs code.
Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3cd6dbe6
[LICENSING] [LICENSING]
ReiserFS is hereby licensed under the GNU General ReiserFS is hereby licensed under the GNU General
Public License version 2. Public License version 2.
...@@ -31,7 +31,7 @@ the GPL as not allowing those additional licensing options, you read ...@@ -31,7 +31,7 @@ the GPL as not allowing those additional licensing options, you read
it wrongly, and Richard Stallman agrees with me, when carefully read it wrongly, and Richard Stallman agrees with me, when carefully read
you can see that those restrictions on additional terms do not apply you can see that those restrictions on additional terms do not apply
to the owner of the copyright, and my interpretation of this shall to the owner of the copyright, and my interpretation of this shall
govern for this license. govern for this license.
Finally, nothing in this license shall be interpreted to allow you to Finally, nothing in this license shall be interpreted to allow you to
fail to fairly credit me, or to remove my credits, without my fail to fairly credit me, or to remove my credits, without my
......
...@@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, ...@@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
/* summary: /* summary:
if deleting something ( tb->insert_size[0] < 0 ) if deleting something ( tb->insert_size[0] < 0 )
return(balance_leaf_when_delete()); (flag d handled here) return(balance_leaf_when_delete()); (flag d handled here)
else else
if lnum is larger than 0 we put items into the left node if lnum is larger than 0 we put items into the left node
if rnum is larger than 0 we put items into the right node if rnum is larger than 0 we put items into the right node
if snum1 is larger than 0 we put items into the new node s1 if snum1 is larger than 0 we put items into the new node s1
if snum2 is larger than 0 we put items into the new node s2 if snum2 is larger than 0 we put items into the new node s2
Note that all *num* count new items being created. Note that all *num* count new items being created.
It would be easier to read balance_leaf() if each of these summary It would be easier to read balance_leaf() if each of these summary
lines was a separate procedure rather than being inlined. I think lines was a separate procedure rather than being inlined. I think
that there are many passages here and in balance_leaf_when_delete() in that there are many passages here and in balance_leaf_when_delete() in
which two calls to one procedure can replace two passages, and it which two calls to one procedure can replace two passages, and it
might save cache space and improve software maintenance costs to do so. might save cache space and improve software maintenance costs to do so.
Vladimir made the perceptive comment that we should offload most of Vladimir made the perceptive comment that we should offload most of
the decision making in this function into fix_nodes/check_balance, and the decision making in this function into fix_nodes/check_balance, and
...@@ -288,15 +288,15 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h ...@@ -288,15 +288,15 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
) )
{ {
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
of the affected item */ of the affected item */
struct buffer_info bi; struct buffer_info bi;
struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
int snum[2]; /* number of items that will be placed int snum[2]; /* number of items that will be placed
into S_new (includes partially shifted into S_new (includes partially shifted
items) */ items) */
int sbytes[2]; /* if an item is partially shifted into S_new then int sbytes[2]; /* if an item is partially shifted into S_new then
if it is a directory item if it is a directory item
it is the number of entries from the item that are shifted into S_new it is the number of entries from the item that are shifted into S_new
else else
it is the number of bytes from the item that are shifted into S_new it is the number of bytes from the item that are shifted into S_new
...@@ -1983,7 +1983,7 @@ static inline void do_balance_starts(struct tree_balance *tb) ...@@ -1983,7 +1983,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
/* store_print_tb (tb); */ /* store_print_tb (tb); */
/* do not delete, just comment it out */ /* do not delete, just comment it out */
/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, /* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
"check");*/ "check");*/
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
#ifdef CONFIG_REISERFS_CHECK #ifdef CONFIG_REISERFS_CHECK
......
...@@ -20,14 +20,14 @@ ...@@ -20,14 +20,14 @@
** insertion/balancing, for files that are written in one write. ** insertion/balancing, for files that are written in one write.
** It avoids unnecessary tail packings (balances) for files that are written in ** It avoids unnecessary tail packings (balances) for files that are written in
** multiple writes and are small enough to have tails. ** multiple writes and are small enough to have tails.
** **
** file_release is called by the VFS layer when the file is closed. If ** file_release is called by the VFS layer when the file is closed. If
** this is the last open file descriptor, and the file ** this is the last open file descriptor, and the file
** small enough to have a tail, and the tail is currently in an ** small enough to have a tail, and the tail is currently in an
** unformatted node, the tail is converted back into a direct item. ** unformatted node, the tail is converted back into a direct item.
** **
** We use reiserfs_truncate_file to pack the tail, since it already has ** We use reiserfs_truncate_file to pack the tail, since it already has
** all the conditions coded. ** all the conditions coded.
*/ */
static int reiserfs_file_release(struct inode *inode, struct file *filp) static int reiserfs_file_release(struct inode *inode, struct file *filp)
{ {
...@@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page, ...@@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
} }
/* Write @count bytes at position @ppos in a file indicated by @file /* Write @count bytes at position @ppos in a file indicated by @file
from the buffer @buf. from the buffer @buf.
generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
** get_direct_parent ** get_direct_parent
** get_neighbors ** get_neighbors
** fix_nodes ** fix_nodes
** **
** **
**/ **/
#include <linux/time.h> #include <linux/time.h>
...@@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, ...@@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
int needed_nodes; int needed_nodes;
int start_item, /* position of item we start filling node from */ int start_item, /* position of item we start filling node from */
end_item, /* position of item we finish filling node by */ end_item, /* position of item we finish filling node by */
start_bytes, /* number of first bytes (entries for directory) of start_item-th item start_bytes, /* number of first bytes (entries for directory) of start_item-th item
we do not include into node that is being filled */ we do not include into node that is being filled */
end_bytes; /* number of last bytes (entries for directory) of end_item-th item end_bytes; /* number of last bytes (entries for directory) of end_item-th item
we do node include into node that is being filled */ we do node include into node that is being filled */
int split_item_positions[2]; /* these are positions in virtual item of int split_item_positions[2]; /* these are positions in virtual item of
items, that are split between S[0] and items, that are split between S[0] and
...@@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb; ...@@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb;
/* Set parameters for balancing. /* Set parameters for balancing.
* Performs write of results of analysis of balancing into structure tb, * Performs write of results of analysis of balancing into structure tb,
* where it will later be used by the functions that actually do the balancing. * where it will later be used by the functions that actually do the balancing.
* Parameters: * Parameters:
* tb tree_balance structure; * tb tree_balance structure;
* h current level of the node; * h current level of the node;
...@@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, ...@@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
* h current level of the node; * h current level of the node;
* inum item number in S[h]; * inum item number in S[h];
* mode i - insert, p - paste; * mode i - insert, p - paste;
* Returns: 1 - schedule occurred; * Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed; * 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed; * -1 - no balancing for higher levels needed;
* -2 - no disk space. * -2 - no disk space.
...@@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) ...@@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
/* we perform 8 calls to get_num_ver(). For each call we calculate five parameters. /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
where 4th parameter is s1bytes and 5th - s2bytes where 4th parameter is s1bytes and 5th - s2bytes
*/ */
short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
0,1 - do not shift and do not shift but bottle 0,1 - do not shift and do not shift but bottle
2 - shift only whole item to left 2 - shift only whole item to left
3 - shift to left and bottle as much as possible 3 - shift to left and bottle as much as possible
...@@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) ...@@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
create_virtual_node(tb, h); create_virtual_node(tb, h);
/* /*
determine maximal number of items we can shift to the left neighbor (in tb structure) determine maximal number of items we can shift to the left neighbor (in tb structure)
and the maximal number of bytes that can flow to the left neighbor and the maximal number of bytes that can flow to the left neighbor
from the left most liquid item that cannot be shifted from S[0] entirely (returned value) from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
...@@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_balance *tb, int h) ...@@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_balance *tb, int h)
{ {
int lpar, rpar, nset, lset, rset, lrset; int lpar, rpar, nset, lset, rset, lrset;
/* /*
* regular overflowing of the node * regular overflowing of the node
*/ */
/* get_num_ver works in 2 modes (FLOW & NO_FLOW) /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
lpar, rpar - number of items we can shift to left/right neighbor (including splitting item) lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
nset, lset, rset, lrset - shows, whether flowing items give better packing nset, lset, rset, lrset - shows, whether flowing items give better packing
*/ */
#define FLOW 1 #define FLOW 1
#define NO_FLOW 0 /* do not any splitting */ #define NO_FLOW 0 /* do not any splitting */
...@@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_balance *tb, int h) ...@@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
* h current level of the node; * h current level of the node;
* inum item number in S[h]; * inum item number in S[h];
* mode i - insert, p - paste; * mode i - insert, p - paste;
* Returns: 1 - schedule occurred; * Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed; * 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed; * -1 - no balancing for higher levels needed;
* -2 - no disk space. * -2 - no disk space.
...@@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) ...@@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
* h current level of the node; * h current level of the node;
* inum item number in S[h]; * inum item number in S[h];
* mode i - insert, p - paste; * mode i - insert, p - paste;
* Returns: 1 - schedule occurred; * Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed; * 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed; * -1 - no balancing for higher levels needed;
* -2 - no disk space. * -2 - no disk space.
...@@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) ...@@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
* h current level of the node; * h current level of the node;
* inum item number in S[h]; * inum item number in S[h];
* mode d - delete, c - cut. * mode d - delete, c - cut.
* Returns: 1 - schedule occurred; * Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed; * 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed; * -1 - no balancing for higher levels needed;
* -2 - no disk space. * -2 - no disk space.
...@@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_balance *tb, int h) ...@@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_balance *tb, int h)
* h current level of the node; * h current level of the node;
* inum item number in S[h]; * inum item number in S[h];
* mode i - insert, p - paste, d - delete, c - cut. * mode i - insert, p - paste, d - delete, c - cut.
* Returns: 1 - schedule occurred; * Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed; * 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed; * -1 - no balancing for higher levels needed;
* -2 - no disk space. * -2 - no disk space.
...@@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) ...@@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
* analyze what and where should be moved; * analyze what and where should be moved;
* get sufficient number of new nodes; * get sufficient number of new nodes;
* Balancing will start only after all resources will be collected at a time. * Balancing will start only after all resources will be collected at a time.
* *
* When ported to SMP kernels, only at the last moment after all needed nodes * When ported to SMP kernels, only at the last moment after all needed nodes
* are collected in cache, will the resources be locked using the usual * are collected in cache, will the resources be locked using the usual
* textbook ordered lock acquisition algorithms. Note that ensuring that * textbook ordered lock acquisition algorithms. Note that ensuring that
* this code neither write locks what it does not need to write lock nor locks out of order * this code neither write locks what it does not need to write lock nor locks out of order
* will be a pain in the butt that could have been avoided. Grumble grumble. -Hans * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
* *
* fix is meant in the sense of render unchanging * fix is meant in the sense of render unchanging
* *
* Latency might be improved by first gathering a list of what buffers are needed * Latency might be improved by first gathering a list of what buffers are needed
* and then getting as many of them in parallel as possible? -Hans * and then getting as many of them in parallel as possible? -Hans
* *
...@@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb) ...@@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
* ins_ih & ins_sd are used when inserting * ins_ih & ins_sd are used when inserting
* Returns: 1 - schedule occurred while the function worked; * Returns: 1 - schedule occurred while the function worked;
* 0 - schedule didn't occur while the function worked; * 0 - schedule didn't occur while the function worked;
* -1 - if no_disk_space * -1 - if no_disk_space
*/ */
int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* (see Applied Cryptography, 2nd edition, p448). * (see Applied Cryptography, 2nd edition, p448).
* *
* Jeremy Fitzhardinge <jeremy@zip.com.au> 1998 * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
* *
* Jeremy has agreed to the contents of reiserfs/README. -Hans * Jeremy has agreed to the contents of reiserfs/README. -Hans
* Yura's function is added (04/07/2000) * Yura's function is added (04/07/2000)
*/ */
......
...@@ -278,7 +278,7 @@ static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n) ...@@ -278,7 +278,7 @@ static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n)
/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest /* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest * last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
* last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
*/ */
static void internal_copy_pointers_items(struct buffer_info *dest_bi, static void internal_copy_pointers_items(struct buffer_info *dest_bi,
struct buffer_head *src, struct buffer_head *src,
...@@ -385,7 +385,7 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi, ...@@ -385,7 +385,7 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi,
if (last_first == FIRST_TO_LAST) { /* shift_left occurs */ if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
first_pointer = 0; first_pointer = 0;
first_item = 0; first_item = 0;
/* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
for key - with first_item */ for key - with first_item */
internal_delete_pointers_items(src_bi, first_pointer, internal_delete_pointers_items(src_bi, first_pointer,
first_item, cpy_num - del_par); first_item, cpy_num - del_par);
...@@ -453,7 +453,7 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b ...@@ -453,7 +453,7 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b
} }
} }
/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. /* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
* Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest. * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
* Replace d_key'th key in buffer cfl. * Replace d_key'th key in buffer cfl.
* Delete pointer_amount items and node pointers from buffer src. * Delete pointer_amount items and node pointers from buffer src.
...@@ -518,7 +518,7 @@ static void internal_shift1_left(struct tree_balance *tb, ...@@ -518,7 +518,7 @@ static void internal_shift1_left(struct tree_balance *tb,
/* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */ /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
} }
/* Insert d_key'th (delimiting) key from buffer cfr to head of dest. /* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
* Copy n node pointers and n - 1 items from buffer src to buffer dest. * Copy n node pointers and n - 1 items from buffer src to buffer dest.
* Replace d_key'th key in buffer cfr. * Replace d_key'th key in buffer cfr.
* Delete n items and node pointers from buffer src. * Delete n items and node pointers from buffer src.
...@@ -749,7 +749,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure ...@@ -749,7 +749,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
this means that new pointers and items must be inserted AFTER * this means that new pointers and items must be inserted AFTER *
child_pos child_pos
} }
else else
{ {
it is the position of the leftmost pointer that must be deleted (together with it is the position of the leftmost pointer that must be deleted (together with
its corresponding key to the left of the pointer) its corresponding key to the left of the pointer)
......
...@@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode *inode) ...@@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode *inode)
/* Do quota update inside a transaction for journaled quotas. We must do that /* Do quota update inside a transaction for journaled quotas. We must do that
* after delete_object so that quota updates go into the same transaction as * after delete_object so that quota updates go into the same transaction as
* stat data deletion */ * stat data deletion */
if (!err) if (!err)
DQUOT_FREE_INODE(inode); DQUOT_FREE_INODE(inode);
if (journal_end(&th, inode->i_sb, jbegin_count)) if (journal_end(&th, inode->i_sb, jbegin_count))
...@@ -363,7 +363,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block, ...@@ -363,7 +363,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
} }
/* make sure we don't read more bytes than actually exist in /* make sure we don't read more bytes than actually exist in
** the file. This can happen in odd cases where i_size isn't ** the file. This can happen in odd cases where i_size isn't
** correct, and when direct item padding results in a few ** correct, and when direct item padding results in a few
** extra bytes at the end of the direct item ** extra bytes at the end of the direct item
*/ */
if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size) if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
...@@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *inode, sector_t block, ...@@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
** -ENOENT instead of a valid buffer. block_prepare_write expects to ** -ENOENT instead of a valid buffer. block_prepare_write expects to
** be able to do i/o on the buffers returned, unless an error value ** be able to do i/o on the buffers returned, unless an error value
** is also returned. ** is also returned.
** **
** So, this allows block_prepare_write to be used for reading a single block ** So, this allows block_prepare_write to be used for reading a single block
** in a page. Where it does not produce a valid page for holes, or past the ** in a page. Where it does not produce a valid page for holes, or past the
** end of the file. This turns out to be exactly what we need for reading ** end of the file. This turns out to be exactly what we need for reading
** tails for conversion. ** tails for conversion.
** **
** The point of the wrapper is forcing a certain value for create, even ** The point of the wrapper is forcing a certain value for create, even
** though the VFS layer is calling this function with create==1. If you ** though the VFS layer is calling this function with create==1. If you
** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, ** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
** don't use this function. ** don't use this function.
*/ */
static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
...@@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, ...@@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
int done; int done;
int fs_gen; int fs_gen;
struct reiserfs_transaction_handle *th = NULL; struct reiserfs_transaction_handle *th = NULL;
/* space reserved in transaction batch: /* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion . 3 balancings in direct->indirect conversion
. 1 block involved into reiserfs_update_sd() . 1 block involved into reiserfs_update_sd()
XXX in practically impossible worst case direct2indirect() XXX in practically impossible worst case direct2indirect()
...@@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, ...@@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
reiserfs_write_unlock(inode->i_sb); reiserfs_write_unlock(inode->i_sb);
/* the item was found, so new blocks were not added to the file /* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this ** there is no need to make sure the inode is updated with this
** transaction ** transaction
*/ */
return retval; return retval;
...@@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, ...@@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* this loop could log more blocks than we had originally asked /* this loop could log more blocks than we had originally asked
** for. So, we have to allow the transaction to end if it is ** for. So, we have to allow the transaction to end if it is
** too big or too full. Update the inode so things are ** too big or too full. Update the inode so things are
** consistent if we crash before the function returns ** consistent if we crash before the function returns
** **
** release the path so that anybody waiting on the path before ** release the path so that anybody waiting on the path before
...@@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block, ...@@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (retval) if (retval)
goto failure; goto failure;
} }
/* inserting indirect pointers for a hole can take a /* inserting indirect pointers for a hole can take a
** long time. reschedule if needed ** long time. reschedule if needed
*/ */
cond_resched(); cond_resched();
...@@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct inode *inode, ...@@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
update sd on unlink all that is required is to check for nlink update sd on unlink all that is required is to check for nlink
here. This bug was first found by Sizif when debugging here. This bug was first found by Sizif when debugging
SquidNG/Butterfly, forgotten, and found again after Philippe SquidNG/Butterfly, forgotten, and found again after Philippe
Gramoulle <philippe.gramoulle@mmania.com> reproduced it. Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
More logical fix would require changes in fs/inode.c:iput() to More logical fix would require changes in fs/inode.c:iput() to
remove inode from hash-table _after_ fs cleaned disk stuff up and remove inode from hash-table _after_ fs cleaned disk stuff up and
...@@ -1619,7 +1619,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync) ...@@ -1619,7 +1619,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync)
if (inode->i_sb->s_flags & MS_RDONLY) if (inode->i_sb->s_flags & MS_RDONLY)
return -EROFS; return -EROFS;
/* memory pressure can sometimes initiate write_inode calls with sync == 1, /* memory pressure can sometimes initiate write_inode calls with sync == 1,
** these cases are just when the system needs ram, not when the ** these cases are just when the system needs ram, not when the
** inode needs to reach disk for safety, and they can safely be ** inode needs to reach disk for safety, and they can safely be
** ignored because the altered inode has already been logged. ** ignored because the altered inode has already been logged.
*/ */
...@@ -1736,7 +1736,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i ...@@ -1736,7 +1736,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
/* inserts the stat data into the tree, and then calls /* inserts the stat data into the tree, and then calls
reiserfs_new_directory (to insert ".", ".." item if new object is reiserfs_new_directory (to insert ".", ".." item if new object is
directory) or reiserfs_new_symlink (to insert symlink body if new directory) or reiserfs_new_symlink (to insert symlink body if new
object is symlink) or nothing (if new object is regular file) object is symlink) or nothing (if new object is regular file)
NOTE! uid and gid must already be set in the inode. If we return NOTE! uid and gid must already be set in the inode. If we return
non-zero due to an error, we have to drop the quota previously allocated non-zero due to an error, we have to drop the quota previously allocated
...@@ -1744,7 +1744,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i ...@@ -1744,7 +1744,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
if we return non-zero, we also end the transaction. */ if we return non-zero, we also end the transaction. */
int reiserfs_new_inode(struct reiserfs_transaction_handle *th, int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct inode *dir, int mode, const char *symname, struct inode *dir, int mode, const char *symname,
/* 0 for regular, EMTRY_DIR_SIZE for dirs, /* 0 for regular, EMTRY_DIR_SIZE for dirs,
strlen (symname) for symlinks) */ strlen (symname) for symlinks) */
loff_t i_size, struct dentry *dentry, loff_t i_size, struct dentry *dentry,
struct inode *inode, struct inode *inode,
...@@ -1794,7 +1794,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, ...@@ -1794,7 +1794,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_bad_inode; goto out_bad_inode;
} }
if (old_format_only(sb)) if (old_format_only(sb))
/* not a perfect generation count, as object ids can be reused, but /* not a perfect generation count, as object ids can be reused, but
** this is as good as reiserfs can do right now. ** this is as good as reiserfs can do right now.
** note that the private part of inode isn't filled in yet, we have ** note that the private part of inode isn't filled in yet, we have
** to use the directory. ** to use the directory.
...@@ -2081,7 +2081,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) ...@@ -2081,7 +2081,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
if (p_s_inode->i_size > 0) { if (p_s_inode->i_size > 0) {
if ((error = grab_tail_page(p_s_inode, &page, &bh))) { if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
// -ENOENT means we truncated past the end of the file, // -ENOENT means we truncated past the end of the file,
// and get_block_create_0 could not find a block to read in, // and get_block_create_0 could not find a block to read in,
// which is ok. // which is ok.
if (error != -ENOENT) if (error != -ENOENT)
...@@ -2093,11 +2093,11 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) ...@@ -2093,11 +2093,11 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
} }
} }
/* so, if page != NULL, we have a buffer head for the offset at /* so, if page != NULL, we have a buffer head for the offset at
** the end of the file. if the bh is mapped, and bh->b_blocknr != 0, ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
** then we have an unformatted node. Otherwise, we have a direct item, ** then we have an unformatted node. Otherwise, we have a direct item,
** and no zeroing is required on disk. We zero after the truncate, ** and no zeroing is required on disk. We zero after the truncate,
** because the truncate might pack the item anyway ** because the truncate might pack the item anyway
** (it will unmap bh if it packs). ** (it will unmap bh if it packs).
*/ */
/* it is enough to reserve space in transaction for 2 balancings: /* it is enough to reserve space in transaction for 2 balancings:
...@@ -2306,8 +2306,8 @@ static int map_block_for_writepage(struct inode *inode, ...@@ -2306,8 +2306,8 @@ static int map_block_for_writepage(struct inode *inode,
return retval; return retval;
} }
/* /*
* mason@suse.com: updated in 2.5.54 to follow the same general io * mason@suse.com: updated in 2.5.54 to follow the same general io
* start/recovery path as __block_write_full_page, along with special * start/recovery path as __block_write_full_page, along with special
* code to handle reiserfs tails. * code to handle reiserfs tails.
*/ */
...@@ -2447,7 +2447,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2447,7 +2447,7 @@ static int reiserfs_write_full_page(struct page *page,
unlock_page(page); unlock_page(page);
/* /*
* since any buffer might be the only dirty buffer on the page, * since any buffer might be the only dirty buffer on the page,
* the first submit_bh can bring the page out of writeback. * the first submit_bh can bring the page out of writeback.
* be careful with the buffers. * be careful with the buffers.
*/ */
...@@ -2466,8 +2466,8 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2466,8 +2466,8 @@ static int reiserfs_write_full_page(struct page *page,
if (nr == 0) { if (nr == 0) {
/* /*
* if this page only had a direct item, it is very possible for * if this page only had a direct item, it is very possible for
* no io to be required without there being an error. Or, * no io to be required without there being an error. Or,
* someone else could have locked them and sent them down the * someone else could have locked them and sent them down the
* pipe without locking the page * pipe without locking the page
*/ */
bh = head; bh = head;
...@@ -2486,7 +2486,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2486,7 +2486,7 @@ static int reiserfs_write_full_page(struct page *page,
fail: fail:
/* catches various errors, we need to make sure any valid dirty blocks /* catches various errors, we need to make sure any valid dirty blocks
* get to the media. The page is currently locked and not marked for * get to the media. The page is currently locked and not marked for
* writeback * writeback
*/ */
ClearPageUptodate(page); ClearPageUptodate(page);
......
...@@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) ...@@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
} }
/* we unpack by finding the page with the tail, and calling /* we unpack by finding the page with the tail, and calling
** reiserfs_prepare_write on that page. This will force a ** reiserfs_prepare_write on that page. This will force a
** reiserfs_get_block to unpack the tail for us. ** reiserfs_get_block to unpack the tail for us.
*/ */
index = inode->i_size >> PAGE_CACHE_SHIFT; index = inode->i_size >> PAGE_CACHE_SHIFT;
......
/* /*
** Write ahead logging implementation copyright Chris Mason 2000 ** Write ahead logging implementation copyright Chris Mason 2000
** **
** The background commits make this code very interelated, and ** The background commits make this code very interelated, and
** overly complex. I need to rethink things a bit....The major players: ** overly complex. I need to rethink things a bit....The major players:
** **
** journal_begin -- call with the number of blocks you expect to log. ** journal_begin -- call with the number of blocks you expect to log.
** If the current transaction is too ** If the current transaction is too
** old, it will block until the current transaction is ** old, it will block until the current transaction is
** finished, and then start a new one. ** finished, and then start a new one.
** Usually, your transaction will get joined in with ** Usually, your transaction will get joined in with
** previous ones for speed. ** previous ones for speed.
** **
** journal_join -- same as journal_begin, but won't block on the current ** journal_join -- same as journal_begin, but won't block on the current
** transaction regardless of age. Don't ever call ** transaction regardless of age. Don't ever call
** this. Ever. There are only two places it should be ** this. Ever. There are only two places it should be
** called from, and they are both inside this file. ** called from, and they are both inside this file.
** **
** journal_mark_dirty -- adds blocks into this transaction. clears any flags ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
** that might make them get sent to disk ** that might make them get sent to disk
** and then marks them BH_JDirty. Puts the buffer head ** and then marks them BH_JDirty. Puts the buffer head
** into the current transaction hash. ** into the current transaction hash.
** **
** journal_end -- if the current transaction is batchable, it does nothing ** journal_end -- if the current transaction is batchable, it does nothing
** otherwise, it could do an async/synchronous commit, or ** otherwise, it could do an async/synchronous commit, or
** a full flush of all log and real blocks in the ** a full flush of all log and real blocks in the
** transaction. ** transaction.
** **
** flush_old_commits -- if the current transaction is too old, it is ended and ** flush_old_commits -- if the current transaction is too old, it is ended and
** commit blocks are sent to disk. Forces commit blocks ** commit blocks are sent to disk. Forces commit blocks
** to disk for all backgrounded commits that have been ** to disk for all backgrounded commits that have been
** around too long. ** around too long.
** -- Note, if you call this as an immediate flush from ** -- Note, if you call this as an immediate flush from
** from within kupdate, it will ignore the immediate flag ** from within kupdate, it will ignore the immediate flag
*/ */
...@@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct super_block *p_s_sb) ...@@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct super_block *p_s_sb)
list_add(&bn->list, &journal->j_bitmap_nodes); list_add(&bn->list, &journal->j_bitmap_nodes);
journal->j_free_bitmap_nodes++; journal->j_free_bitmap_nodes++;
} else { } else {
break; // this is ok, we'll try again when more are needed break; /* this is ok, we'll try again when more are needed */
} }
} }
} }
...@@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct super_block *p_s_sb) ...@@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct super_block *p_s_sb)
} }
/* /*
** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
** jb_array is the array to be filled in. ** jb_array is the array to be filled in.
*/ */
int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
...@@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, ...@@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
} }
/* /*
** find an available list bitmap. If you can't find one, flush a commit list ** find an available list bitmap. If you can't find one, flush a commit list
** and try again ** and try again
*/ */
static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
...@@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, ...@@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
return jb; return jb;
} }
/* /*
** allocates a new chunk of X nodes, and links them all together as a list. ** allocates a new chunk of X nodes, and links them all together as a list.
** Uses the cnode->next and cnode->prev pointers ** Uses the cnode->next and cnode->prev pointers
** returns NULL on failure ** returns NULL on failure
...@@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) ...@@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
} }
/* /*
** pulls a cnode off the free list, or returns NULL on failure ** pulls a cnode off the free list, or returns NULL on failure
*/ */
static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
{ {
...@@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) ...@@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
} }
/* /*
** returns a cnode to the free list ** returns a cnode to the free list
*/ */
static void free_cnode(struct super_block *p_s_sb, static void free_cnode(struct super_block *p_s_sb,
struct reiserfs_journal_cnode *cn) struct reiserfs_journal_cnode *cn)
...@@ -1192,8 +1192,8 @@ static int flush_commit_list(struct super_block *s, ...@@ -1192,8 +1192,8 @@ static int flush_commit_list(struct super_block *s,
} }
/* /*
** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
** returns NULL if it can't find anything ** returns NULL if it can't find anything
*/ */
static struct reiserfs_journal_list *find_newer_jl_for_cn(struct static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
reiserfs_journal_cnode reiserfs_journal_cnode
...@@ -1335,8 +1335,8 @@ static int update_journal_header_block(struct super_block *p_s_sb, ...@@ -1335,8 +1335,8 @@ static int update_journal_header_block(struct super_block *p_s_sb,
return _update_journal_header_block(p_s_sb, offset, trans_id); return _update_journal_header_block(p_s_sb, offset, trans_id);
} }
/* /*
** flush any and all journal lists older than you are ** flush any and all journal lists older than you are
** can only be called from flush_journal_list ** can only be called from flush_journal_list
*/ */
static int flush_older_journal_lists(struct super_block *p_s_sb, static int flush_older_journal_lists(struct super_block *p_s_sb,
...@@ -1382,8 +1382,8 @@ static void del_from_work_list(struct super_block *s, ...@@ -1382,8 +1382,8 @@ static void del_from_work_list(struct super_block *s,
** always set flushall to 1, unless you are calling from inside ** always set flushall to 1, unless you are calling from inside
** flush_journal_list ** flush_journal_list
** **
** IMPORTANT. This can only be called while there are no journal writers, ** IMPORTANT. This can only be called while there are no journal writers,
** and the journal is locked. That means it can only be called from ** and the journal is locked. That means it can only be called from
** do_journal_end, or by journal_release ** do_journal_end, or by journal_release
*/ */
static int flush_journal_list(struct super_block *s, static int flush_journal_list(struct super_block *s,
...@@ -1429,7 +1429,7 @@ static int flush_journal_list(struct super_block *s, ...@@ -1429,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
goto flush_older_and_return; goto flush_older_and_return;
} }
/* start by putting the commit list on disk. This will also flush /* start by putting the commit list on disk. This will also flush
** the commit lists of any olders transactions ** the commit lists of any olders transactions
*/ */
flush_commit_list(s, jl, 1); flush_commit_list(s, jl, 1);
...@@ -1444,8 +1444,8 @@ static int flush_journal_list(struct super_block *s, ...@@ -1444,8 +1444,8 @@ static int flush_journal_list(struct super_block *s,
goto flush_older_and_return; goto flush_older_and_return;
} }
/* loop through each cnode, see if we need to write it, /* loop through each cnode, see if we need to write it,
** or wait on a more recent transaction, or just ignore it ** or wait on a more recent transaction, or just ignore it
*/ */
if (atomic_read(&(journal->j_wcount)) != 0) { if (atomic_read(&(journal->j_wcount)) != 0) {
reiserfs_panic(s, "journal-844", "journal list is flushing, " reiserfs_panic(s, "journal-844", "journal list is flushing, "
...@@ -1473,8 +1473,8 @@ static int flush_journal_list(struct super_block *s, ...@@ -1473,8 +1473,8 @@ static int flush_journal_list(struct super_block *s,
if (!pjl && cn->bh) { if (!pjl && cn->bh) {
saved_bh = cn->bh; saved_bh = cn->bh;
/* we do this to make sure nobody releases the buffer while /* we do this to make sure nobody releases the buffer while
** we are working with it ** we are working with it
*/ */
get_bh(saved_bh); get_bh(saved_bh);
...@@ -1497,8 +1497,8 @@ static int flush_journal_list(struct super_block *s, ...@@ -1497,8 +1497,8 @@ static int flush_journal_list(struct super_block *s,
goto free_cnode; goto free_cnode;
} }
/* bh == NULL when the block got to disk on its own, OR, /* bh == NULL when the block got to disk on its own, OR,
** the block got freed in a future transaction ** the block got freed in a future transaction
*/ */
if (saved_bh == NULL) { if (saved_bh == NULL) {
goto free_cnode; goto free_cnode;
...@@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s, ...@@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s,
__func__); __func__);
flush_older_and_return: flush_older_and_return:
/* before we can update the journal header block, we _must_ flush all /* before we can update the journal header block, we _must_ flush all
** real blocks from all older transactions to disk. This is because ** real blocks from all older transactions to disk. This is because
** once the header block is updated, this transaction will not be ** once the header block is updated, this transaction will not be
** replayed after a crash ** replayed after a crash
...@@ -1596,7 +1596,7 @@ static int flush_journal_list(struct super_block *s, ...@@ -1596,7 +1596,7 @@ static int flush_journal_list(struct super_block *s,
} }
err = journal->j_errno; err = journal->j_errno;
/* before we can remove everything from the hash tables for this /* before we can remove everything from the hash tables for this
** transaction, we must make sure it can never be replayed ** transaction, we must make sure it can never be replayed
** **
** since we are only called from do_journal_end, we know for sure there ** since we are only called from do_journal_end, we know for sure there
...@@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(struct super_block *p_s_sb, ...@@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(struct super_block *p_s_sb,
return 0; return 0;
} }
/* returns 0 if it did not find a description block /* returns 0 if it did not find a description block
** returns -1 if it found a corrupt commit block ** returns -1 if it found a corrupt commit block
** returns 1 if both desc and commit were valid ** returns 1 if both desc and commit were valid
*/ */
static int journal_transaction_is_valid(struct super_block *p_s_sb, static int journal_transaction_is_valid(struct super_block *p_s_sb,
struct buffer_head *d_bh, struct buffer_head *d_bh,
...@@ -2380,8 +2380,8 @@ static int journal_read(struct super_block *p_s_sb) ...@@ -2380,8 +2380,8 @@ static int journal_read(struct super_block *p_s_sb)
bdevname(journal->j_dev_bd, b)); bdevname(journal->j_dev_bd, b));
start = get_seconds(); start = get_seconds();
/* step 1, read in the journal header block. Check the transaction it says /* step 1, read in the journal header block. Check the transaction it says
** is the first unflushed, and if that transaction is not valid, ** is the first unflushed, and if that transaction is not valid,
** replay is done ** replay is done
*/ */
journal->j_header_bh = journal_bread(p_s_sb, journal->j_header_bh = journal_bread(p_s_sb,
...@@ -2406,8 +2406,8 @@ static int journal_read(struct super_block *p_s_sb) ...@@ -2406,8 +2406,8 @@ static int journal_read(struct super_block *p_s_sb)
le32_to_cpu(jh->j_last_flush_trans_id)); le32_to_cpu(jh->j_last_flush_trans_id));
valid_journal_header = 1; valid_journal_header = 1;
/* now, we try to read the first unflushed offset. If it is not valid, /* now, we try to read the first unflushed offset. If it is not valid,
** there is nothing more we can do, and it makes no sense to read ** there is nothing more we can do, and it makes no sense to read
** through the whole log. ** through the whole log.
*/ */
d_bh = d_bh =
...@@ -2919,7 +2919,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th, ...@@ -2919,7 +2919,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
return 0; return 0;
} }
/* this must be called inside a transaction, and requires the /* this must be called inside a transaction, and requires the
** kernel_lock to be held ** kernel_lock to be held
*/ */
void reiserfs_block_writes(struct reiserfs_transaction_handle *th) void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
...@@ -3040,7 +3040,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, ...@@ -3040,7 +3040,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
now = get_seconds(); now = get_seconds();
/* if there is no room in the journal OR /* if there is no room in the journal OR
** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
** we don't sleep if there aren't other writers ** we don't sleep if there aren't other writers
*/ */
...@@ -3240,7 +3240,7 @@ int journal_begin(struct reiserfs_transaction_handle *th, ...@@ -3240,7 +3240,7 @@ int journal_begin(struct reiserfs_transaction_handle *th,
** **
** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
** transaction is committed. ** transaction is committed.
** **
** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
*/ */
int journal_mark_dirty(struct reiserfs_transaction_handle *th, int journal_mark_dirty(struct reiserfs_transaction_handle *th,
...@@ -3290,7 +3290,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, ...@@ -3290,7 +3290,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
atomic_read(&(journal->j_wcount))); atomic_read(&(journal->j_wcount)));
return 1; return 1;
} }
/* this error means I've screwed up, and we've overflowed the transaction. /* this error means I've screwed up, and we've overflowed the transaction.
** Nothing can be done here, except make the FS readonly or panic. ** Nothing can be done here, except make the FS readonly or panic.
*/ */
if (journal->j_len >= journal->j_trans_max) { if (journal->j_len >= journal->j_trans_max) {
...@@ -3380,7 +3380,7 @@ int journal_end(struct reiserfs_transaction_handle *th, ...@@ -3380,7 +3380,7 @@ int journal_end(struct reiserfs_transaction_handle *th,
} }
} }
/* removes from the current transaction, relsing and descrementing any counters. /* removes from the current transaction, relsing and descrementing any counters.
** also files the removed buffer directly onto the clean list ** also files the removed buffer directly onto the clean list
** **
** called by journal_mark_freed when a block has been deleted ** called by journal_mark_freed when a block has been deleted
...@@ -3478,7 +3478,7 @@ static int can_dirty(struct reiserfs_journal_cnode *cn) ...@@ -3478,7 +3478,7 @@ static int can_dirty(struct reiserfs_journal_cnode *cn)
} }
/* syncs the commit blocks, but does not force the real buffers to disk /* syncs the commit blocks, but does not force the real buffers to disk
** will wait until the current transaction is done/committed before returning ** will wait until the current transaction is done/committed before returning
*/ */
int journal_end_sync(struct reiserfs_transaction_handle *th, int journal_end_sync(struct reiserfs_transaction_handle *th,
struct super_block *p_s_sb, unsigned long nblocks) struct super_block *p_s_sb, unsigned long nblocks)
...@@ -3560,13 +3560,13 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb) ...@@ -3560,13 +3560,13 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb)
/* /*
** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
** **
** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
** flushes the commit list and returns 0. ** flushes the commit list and returns 0.
** **
** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
** **
** Note, we can't allow the journal_end to proceed while there are still writers in the log. ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
*/ */
static int check_journal_end(struct reiserfs_transaction_handle *th, static int check_journal_end(struct reiserfs_transaction_handle *th,
...@@ -3594,7 +3594,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, ...@@ -3594,7 +3594,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
atomic_dec(&(journal->j_wcount)); atomic_dec(&(journal->j_wcount));
} }
/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
** will be dealt with by next transaction that actually writes something, but should be taken ** will be dealt with by next transaction that actually writes something, but should be taken
** care of in this trans ** care of in this trans
*/ */
...@@ -3603,7 +3603,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, ...@@ -3603,7 +3603,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
/* if wcount > 0, and we are called to with flush or commit_now, /* if wcount > 0, and we are called to with flush or commit_now,
** we wait on j_join_wait. We will wake up when the last writer has ** we wait on j_join_wait. We will wake up when the last writer has
** finished the transaction, and started it on its way to the disk. ** finished the transaction, and started it on its way to the disk.
** Then, we flush the commit or journal list, and just return 0 ** Then, we flush the commit or journal list, and just return 0
** because the rest of journal end was already done for this transaction. ** because the rest of journal end was already done for this transaction.
*/ */
if (atomic_read(&(journal->j_wcount)) > 0) { if (atomic_read(&(journal->j_wcount)) > 0) {
...@@ -3674,7 +3674,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, ...@@ -3674,7 +3674,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
/* /*
** Does all the work that makes deleting blocks safe. ** Does all the work that makes deleting blocks safe.
** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
** **
** otherwise: ** otherwise:
** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
** before this transaction has finished. ** before this transaction has finished.
...@@ -3878,7 +3878,7 @@ extern struct tree_balance *cur_tb; ...@@ -3878,7 +3878,7 @@ extern struct tree_balance *cur_tb;
** be written to disk while we are altering it. So, we must: ** be written to disk while we are altering it. So, we must:
** clean it ** clean it
** wait on it. ** wait on it.
** **
*/ */
int reiserfs_prepare_for_journal(struct super_block *p_s_sb, int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
struct buffer_head *bh, int wait) struct buffer_head *bh, int wait)
...@@ -3920,7 +3920,7 @@ static void flush_old_journal_lists(struct super_block *s) ...@@ -3920,7 +3920,7 @@ static void flush_old_journal_lists(struct super_block *s)
} }
} }
/* /*
** long and ugly. If flush, will not return until all commit ** long and ugly. If flush, will not return until all commit
** blocks and all real buffers in the trans are on disk. ** blocks and all real buffers in the trans are on disk.
** If no_async, won't return until all commit blocks are on disk. ** If no_async, won't return until all commit blocks are on disk.
...@@ -3981,7 +3981,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ...@@ -3981,7 +3981,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
wait_on_commit = 1; wait_on_commit = 1;
} }
/* check_journal_end locks the journal, and unlocks if it does not return 1 /* check_journal_end locks the journal, and unlocks if it does not return 1
** it tells us if we should continue with the journal_end, or just return ** it tells us if we should continue with the journal_end, or just return
*/ */
if (!check_journal_end(th, p_s_sb, nblocks, flags)) { if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
...@@ -4078,7 +4078,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ...@@ -4078,7 +4078,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
last_cn->next = jl_cn; last_cn->next = jl_cn;
} }
last_cn = jl_cn; last_cn = jl_cn;
/* make sure the block we are trying to log is not a block /* make sure the block we are trying to log is not a block
of journal or reserved area */ of journal or reserved area */
if (is_block_in_log_or_reserved_area if (is_block_in_log_or_reserved_area
...@@ -4225,9 +4225,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ...@@ -4225,9 +4225,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
} else if (!(jl->j_state & LIST_COMMIT_PENDING)) } else if (!(jl->j_state & LIST_COMMIT_PENDING))
queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
/* if the next transaction has any chance of wrapping, flush /* if the next transaction has any chance of wrapping, flush
** transactions that might get overwritten. If any journal lists are very ** transactions that might get overwritten. If any journal lists are very
** old flush them as well. ** old flush them as well.
*/ */
first_jl: first_jl:
list_for_each_safe(entry, safe, &journal->j_journal_list) { list_for_each_safe(entry, safe, &journal->j_journal_list) {
......
...@@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi, ...@@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
DEH_SIZE * copy_count + copy_records_len); DEH_SIZE * copy_count + copy_records_len);
} }
/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or /* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
part of it or nothing (see the return 0 below) from SOURCE to the end part of it or nothing (see the return 0 below) from SOURCE to the end
(if last_first) or beginning (!last_first) of the DEST */ (if last_first) or beginning (!last_first) of the DEST */
/* returns 1 if anything was copied, else 0 */ /* returns 1 if anything was copied, else 0 */
static int leaf_copy_boundary_item(struct buffer_info *dest_bi, static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
...@@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi, ...@@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
else { else {
struct item_head n_ih; struct item_head n_ih;
/* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
part defined by 'cpy_bytes'; create new item header; change old item_header (????); part defined by 'cpy_bytes'; create new item header; change old item_header (????);
n_ih = new item_header; n_ih = new item_header;
*/ */
...@@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi, ...@@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
else { else {
struct item_head n_ih; struct item_head n_ih;
/* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
part defined by 'cpy_bytes'; create new item header; part defined by 'cpy_bytes'; create new item header;
n_ih = new item_header; n_ih = new item_header;
*/ */
...@@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes) ...@@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
static void leaf_delete_items_entirely(struct buffer_info *bi, static void leaf_delete_items_entirely(struct buffer_info *bi,
int first, int del_num); int first, int del_num);
/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR. /* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
If not. If not.
If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
the first item. Part defined by del_bytes. Don't delete first item header the first item. Part defined by del_bytes. Don't delete first item header
If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
...@@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, ...@@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
/* len = body len of item */ /* len = body len of item */
len = ih_item_len(ih); len = ih_item_len(ih);
/* delete the part of the last item of the bh /* delete the part of the last item of the bh
do not delete item header do not delete item header
*/ */
leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1, leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
...@@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before, ...@@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before,
} }
} }
/* paste paste_size bytes to affected_item_num-th item. /* paste paste_size bytes to affected_item_num-th item.
When item is a directory, this only prepare space for new entries */ When item is a directory, this only prepare space for new entries */
void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num, void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
int pos_in_item, int paste_size, int pos_in_item, int paste_size,
...@@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffer_head *bh, ...@@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffer_head *bh,
/* when cut item is part of regular file /* when cut item is part of regular file
pos_in_item - first byte that must be cut pos_in_item - first byte that must be cut
cut_size - number of bytes to be cut beginning from pos_in_item cut_size - number of bytes to be cut beginning from pos_in_item
when cut item is part of directory when cut item is part of directory
pos_in_item - number of first deleted entry pos_in_item - number of first deleted entry
cut_size - count of deleted entries cut_size - count of deleted entries
...@@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_info *bi, ...@@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_info *bi,
/* change item key if necessary (when we paste before 0-th entry */ /* change item key if necessary (when we paste before 0-th entry */
if (!before) { if (!before) {
set_le_ih_k_offset(ih, deh_offset(new_dehs)); set_le_ih_k_offset(ih, deh_offset(new_dehs));
/* memcpy (&ih->ih_key.k_offset, /* memcpy (&ih->ih_key.k_offset,
&new_dehs->deh_offset, SHORT_KEY_SIZE);*/ &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
} }
#ifdef CONFIG_REISERFS_CHECK #ifdef CONFIG_REISERFS_CHECK
......
...@@ -106,7 +106,7 @@ key of the first directory entry in it. ...@@ -106,7 +106,7 @@ key of the first directory entry in it.
This function first calls search_by_key, then, if item whose first This function first calls search_by_key, then, if item whose first
entry matches is not found it looks for the entry inside directory entry matches is not found it looks for the entry inside directory
item found by search_by_key. Fills the path to the entry, and to the item found by search_by_key. Fills the path to the entry, and to the
entry position in the item entry position in the item
*/ */
...@@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry, ...@@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
return d_splice_alias(inode, dentry); return d_splice_alias(inode, dentry);
} }
/* /*
** looks up the dentry of the parent directory for child. ** looks up the dentry of the parent directory for child.
** taken from ext2_get_parent ** taken from ext2_get_parent
*/ */
...@@ -401,7 +401,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child) ...@@ -401,7 +401,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child)
return d_obtain_alias(inode); return d_obtain_alias(inode);
} }
/* add entry to the directory (entry can be hidden). /* add entry to the directory (entry can be hidden).
insert definition of when hidden directories are used here -Hans insert definition of when hidden directories are used here -Hans
...@@ -559,7 +559,7 @@ static int drop_new_inode(struct inode *inode) ...@@ -559,7 +559,7 @@ static int drop_new_inode(struct inode *inode)
return 0; return 0;
} }
/* utility function that does setup for reiserfs_new_inode. /* utility function that does setup for reiserfs_new_inode.
** DQUOT_INIT needs lots of credits so it's better to have it ** DQUOT_INIT needs lots of credits so it's better to have it
** outside of a transaction, so we had to pull some bits of ** outside of a transaction, so we had to pull some bits of
** reiserfs_new_inode out into this func. ** reiserfs_new_inode out into this func.
...@@ -820,7 +820,7 @@ static inline int reiserfs_empty_dir(struct inode *inode) ...@@ -820,7 +820,7 @@ static inline int reiserfs_empty_dir(struct inode *inode)
{ {
/* we can cheat because an old format dir cannot have /* we can cheat because an old format dir cannot have
** EMPTY_DIR_SIZE, and a new format dir cannot have ** EMPTY_DIR_SIZE, and a new format dir cannot have
** EMPTY_DIR_SIZE_V1. So, if the inode is either size, ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
** regardless of disk format version, the directory is empty. ** regardless of disk format version, the directory is empty.
*/ */
if (inode->i_size != EMPTY_DIR_SIZE && if (inode->i_size != EMPTY_DIR_SIZE &&
...@@ -1162,7 +1162,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir, ...@@ -1162,7 +1162,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
return retval; return retval;
} }
// de contains information pointing to an entry which /* de contains information pointing to an entry which */
static int de_still_valid(const char *name, int len, static int de_still_valid(const char *name, int len,
struct reiserfs_dir_entry *de) struct reiserfs_dir_entry *de)
{ {
...@@ -1206,10 +1206,10 @@ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de, ...@@ -1206,10 +1206,10 @@ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de,
de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid; de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
} }
/* /*
* process, that is going to call fix_nodes/do_balance must hold only * process, that is going to call fix_nodes/do_balance must hold only
* one path. If it holds 2 or more, it can get into endless waiting in * one path. If it holds 2 or more, it can get into endless waiting in
* get_empty_nodes or its clones * get_empty_nodes or its clones
*/ */
static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry) struct inode *new_dir, struct dentry *new_dentry)
...@@ -1263,7 +1263,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1263,7 +1263,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_inode_mode = old_inode->i_mode; old_inode_mode = old_inode->i_mode;
if (S_ISDIR(old_inode_mode)) { if (S_ISDIR(old_inode_mode)) {
// make sure, that directory being renamed has correct ".." // make sure, that directory being renamed has correct ".."
// and that its new parent directory has not too many links // and that its new parent directory has not too many links
// already // already
...@@ -1274,8 +1274,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1274,8 +1274,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} }
} }
/* directory is renamed, its parent directory will be changed, /* directory is renamed, its parent directory will be changed,
** so find ".." entry ** so find ".." entry
*/ */
dot_dot_de.de_gen_number_bit_string = NULL; dot_dot_de.de_gen_number_bit_string = NULL;
retval = retval =
...@@ -1385,9 +1385,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1385,9 +1385,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
this stuff, yes? Then, having this stuff, yes? Then, having
gathered everything into RAM we gathered everything into RAM we
should lock the buffers, yes? -Hans */ should lock the buffers, yes? -Hans */
/* probably. our rename needs to hold more /* probably. our rename needs to hold more
** than one path at once. The seals would ** than one path at once. The seals would
** have to be written to deal with multi-path ** have to be written to deal with multi-path
** issues -chris ** issues -chris
*/ */
/* sanity checking before doing the rename - avoid races many /* sanity checking before doing the rename - avoid races many
...@@ -1465,7 +1465,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1465,7 +1465,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} }
if (S_ISDIR(old_inode_mode)) { if (S_ISDIR(old_inode_mode)) {
// adjust ".." of renamed directory /* adjust ".." of renamed directory */
set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir)); set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh); journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
......
...@@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s) ...@@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
if (cur_size > new_size) { if (cur_size > new_size) {
/* mark everyone used that was listed as free at the end of the objectid /* mark everyone used that was listed as free at the end of the objectid
** map ** map
*/ */
objectid_map[new_size - 1] = objectid_map[cur_size - 1]; objectid_map[new_size - 1] = objectid_map[cur_size - 1];
set_sb_oid_cursize(disk_sb, new_size); set_sb_oid_cursize(disk_sb, new_size);
......
...@@ -178,11 +178,11 @@ static char *is_there_reiserfs_struct(char *fmt, int *what) ...@@ -178,11 +178,11 @@ static char *is_there_reiserfs_struct(char *fmt, int *what)
appropriative printk. With this reiserfs_warning you can use format appropriative printk. With this reiserfs_warning you can use format
specification for complex structures like you used to do with specification for complex structures like you used to do with
printfs for integers, doubles and pointers. For instance, to print printfs for integers, doubles and pointers. For instance, to print
out key structure you have to write just: out key structure you have to write just:
reiserfs_warning ("bad key %k", key); reiserfs_warning ("bad key %k", key);
instead of instead of
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid, printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
key->k_offset, key->k_uniqueness); key->k_offset, key->k_uniqueness);
*/ */
static DEFINE_SPINLOCK(error_lock); static DEFINE_SPINLOCK(error_lock);
static void prepare_error_buf(const char *fmt, va_list args) static void prepare_error_buf(const char *fmt, va_list args)
...@@ -244,11 +244,11 @@ static void prepare_error_buf(const char *fmt, va_list args) ...@@ -244,11 +244,11 @@ static void prepare_error_buf(const char *fmt, va_list args)
} }
/* in addition to usual conversion specifiers this accepts reiserfs /* in addition to usual conversion specifiers this accepts reiserfs
specific conversion specifiers: specific conversion specifiers:
%k to print little endian key, %k to print little endian key,
%K to print cpu key, %K to print cpu key,
%h to print item_head, %h to print item_head,
%t to print directory entry %t to print directory entry
%z to print block head (arg must be struct buffer_head * %z to print block head (arg must be struct buffer_head *
%b to print buffer_head %b to print buffer_head
*/ */
...@@ -314,17 +314,17 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...) ...@@ -314,17 +314,17 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
maintainer-errorid. Don't bother with reusing errorids, there are maintainer-errorid. Don't bother with reusing errorids, there are
lots of numbers out there. lots of numbers out there.
Example: Example:
reiserfs_panic( reiserfs_panic(
p_sb, "reiser-29: reiserfs_new_blocknrs: " p_sb, "reiser-29: reiserfs_new_blocknrs: "
"one of search_start or rn(%d) is equal to MAX_B_NUM," "one of search_start or rn(%d) is equal to MAX_B_NUM,"
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).", "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
rn, bh rn, bh
); );
Regular panic()s sometimes clear the screen before the message can Regular panic()s sometimes clear the screen before the message can
be read, thus the need for the while loop. be read, thus the need for the while loop.
Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
pointless complexity): pointless complexity):
......
...@@ -633,7 +633,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, ...@@ -633,7 +633,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start,
* *
*/ */
/* /*
* Make Linus happy. * Make Linus happy.
* Local variables: * Local variables:
* c-indentation-style: "K&R" * c-indentation-style: "K&R"
......
/* /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/ */
/* /*
* Written by Alexander Zarochentcev. * Written by Alexander Zarochentcev.
* *
* The kernel part of the (on-line) reiserfs resizer. * The kernel part of the (on-line) reiserfs resizer.
...@@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new) ...@@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size); memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
/* just in case vfree schedules on us, copy the new /* just in case vfree schedules on us, copy the new
** pointer into the journal struct before freeing the ** pointer into the journal struct before freeing the
** old one ** old one
*/ */
node_tmp = jb->bitmaps; node_tmp = jb->bitmaps;
......
...@@ -77,7 +77,7 @@ inline void copy_item_head(struct item_head *p_v_to, ...@@ -77,7 +77,7 @@ inline void copy_item_head(struct item_head *p_v_to,
/* k1 is pointer to on-disk structure which is stored in little-endian /* k1 is pointer to on-disk structure which is stored in little-endian
form. k2 is pointer to cpu variable. For key of items of the same form. k2 is pointer to cpu variable. For key of items of the same
object this returns 0. object this returns 0.
Returns: -1 if key1 < key2 Returns: -1 if key1 < key2
0 if key1 == key2 0 if key1 == key2
1 if key1 > key2 */ 1 if key1 > key2 */
inline int comp_short_keys(const struct reiserfs_key *le_key, inline int comp_short_keys(const struct reiserfs_key *le_key,
...@@ -890,7 +890,7 @@ static inline int prepare_for_direct_item(struct treepath *path, ...@@ -890,7 +890,7 @@ static inline int prepare_for_direct_item(struct treepath *path,
} }
// new file gets truncated // new file gets truncated
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) { if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
// //
round_len = ROUND_UP(new_file_length); round_len = ROUND_UP(new_file_length);
/* this was n_new_file_length < le_ih ... */ /* this was n_new_file_length < le_ih ... */
if (round_len < le_ih_k_offset(le_ih)) { if (round_len < le_ih_k_offset(le_ih)) {
...@@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, ...@@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
if (atomic_read(&p_s_inode->i_count) > 1 || if (atomic_read(&p_s_inode->i_count) > 1 ||
!tail_has_to_be_packed(p_s_inode) || !tail_has_to_be_packed(p_s_inode) ||
!page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) { !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
// leave tail in an unformatted node /* leave tail in an unformatted node */
*p_c_mode = M_SKIP_BALANCING; *p_c_mode = M_SKIP_BALANCING;
cut_bytes = cut_bytes =
n_block_size - (n_new_file_size & (n_block_size - 1)); n_block_size - (n_new_file_size & (n_block_size - 1));
...@@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p ...@@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p
/* While there are bytes to truncate and previous file item is presented in the tree. */ /* While there are bytes to truncate and previous file item is presented in the tree. */
/* /*
** This loop could take a really long time, and could log ** This loop could take a really long time, and could log
** many more blocks than a transaction can hold. So, we do a polite ** many more blocks than a transaction can hold. So, we do a polite
** journal end here, and if the transaction needs ending, we make ** journal end here, and if the transaction needs ending, we make
** sure the file is consistent before ending the current trans ** sure the file is consistent before ending the current trans
......
...@@ -758,7 +758,7 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts, ...@@ -758,7 +758,7 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
char **opt_arg, unsigned long *bit_flags) char **opt_arg, unsigned long *bit_flags)
{ {
char *p; char *p;
/* foo=bar, /* foo=bar,
^ ^ ^ ^ ^ ^
| | +-- option_end | | +-- option_end
| +-- arg_start | +-- arg_start
...@@ -1348,7 +1348,7 @@ static int read_super_block(struct super_block *s, int offset) ...@@ -1348,7 +1348,7 @@ static int read_super_block(struct super_block *s, int offset)
} }
// //
// ok, reiserfs signature (old or new) found in at the given offset // ok, reiserfs signature (old or new) found in at the given offset
// //
fs_blocksize = sb_blocksize(rs); fs_blocksize = sb_blocksize(rs);
brelse(bh); brelse(bh);
sb_set_blocksize(s, fs_blocksize); sb_set_blocksize(s, fs_blocksize);
...@@ -1534,8 +1534,8 @@ static int what_hash(struct super_block *s) ...@@ -1534,8 +1534,8 @@ static int what_hash(struct super_block *s)
code = find_hash_out(s); code = find_hash_out(s);
if (code != UNSET_HASH && reiserfs_hash_detect(s)) { if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
/* detection has found the hash, and we must check against the /* detection has found the hash, and we must check against the
** mount options ** mount options
*/ */
if (reiserfs_rupasov_hash(s) && code != YURA_HASH) { if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
reiserfs_warning(s, "reiserfs-2507", reiserfs_warning(s, "reiserfs-2507",
...@@ -1567,7 +1567,7 @@ static int what_hash(struct super_block *s) ...@@ -1567,7 +1567,7 @@ static int what_hash(struct super_block *s)
} }
} }
/* if we are mounted RW, and we have a new valid hash code, update /* if we are mounted RW, and we have a new valid hash code, update
** the super ** the super
*/ */
if (code != UNSET_HASH && if (code != UNSET_HASH &&
......
...@@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, ...@@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
/* Set the key to search for the place for new unfm pointer */ /* Set the key to search for the place for new unfm pointer */
make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4); make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
// FIXME: we could avoid this /* FIXME: we could avoid this */
if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) { if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
reiserfs_error(sb, "PAP-14030", reiserfs_error(sb, "PAP-14030",
"pasted or inserted byte exists in " "pasted or inserted byte exists in "
......
...@@ -14,7 +14,7 @@ typedef enum { ...@@ -14,7 +14,7 @@ typedef enum {
} reiserfs_super_block_flags; } reiserfs_super_block_flags;
/* struct reiserfs_super_block accessors/mutators /* struct reiserfs_super_block accessors/mutators
* since this is a disk structure, it will always be in * since this is a disk structure, it will always be in
* little endian format. */ * little endian format. */
#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
...@@ -83,16 +83,16 @@ typedef enum { ...@@ -83,16 +83,16 @@ typedef enum {
/* LOGGING -- */ /* LOGGING -- */
/* These all interelate for performance. /* These all interelate for performance.
** **
** If the journal block count is smaller than n transactions, you lose speed. ** If the journal block count is smaller than n transactions, you lose speed.
** I don't know what n is yet, I'm guessing 8-16. ** I don't know what n is yet, I'm guessing 8-16.
** **
** typical transaction size depends on the application, how often fsync is ** typical transaction size depends on the application, how often fsync is
** called, and how many metadata blocks you dirty in a 30 second period. ** called, and how many metadata blocks you dirty in a 30 second period.
** The more small files (<16k) you use, the larger your transactions will ** The more small files (<16k) you use, the larger your transactions will
** be. ** be.
** **
** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
** to prevent wrapping before dirty meta blocks get to disk. ** to prevent wrapping before dirty meta blocks get to disk.
...@@ -242,7 +242,7 @@ struct reiserfs_journal { ...@@ -242,7 +242,7 @@ struct reiserfs_journal {
struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
the transactions */ the transactions */
struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
int j_persistent_trans; int j_persistent_trans;
...@@ -426,7 +426,7 @@ enum reiserfs_mount_options { ...@@ -426,7 +426,7 @@ enum reiserfs_mount_options {
partition will be dealt with in a partition will be dealt with in a
manner of 3.5.x */ manner of 3.5.x */
/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
** is not required. If the normal autodection code can't determine which ** is not required. If the normal autodection code can't determine which
** hash to use (because both hashes had the same value for a file) ** hash to use (because both hashes had the same value for a file)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment