Commit 2840c566 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull reiserfs and ext3 changes from Jan Kara:
 "Big reiserfs cleanup from Jeff, an ext3 deadlock fix, and some small
  cleanups"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (34 commits)
  reiserfs: Fix compilation breakage with CONFIG_REISERFS_CHECK
  ext3: Fix deadlock in data=journal mode when fs is frozen
  reiserfs: call truncate_setsize under tailpack mutex
  fs/jbd/revoke.c: replace shift loop by ilog2
  reiserfs: remove obsolete __constant_cpu_to_le32
  reiserfs: balance_leaf refactor, split up balance_leaf_when_delete
  reiserfs: balance_leaf refactor, format balance_leaf_finish_node
  reiserfs: balance_leaf refactor, format balance_leaf_new_nodes_paste
  reiserfs: balance_leaf refactor, format balance_leaf_paste_right
  reiserfs: balance_leaf refactor, format balance_leaf_insert_right
  reiserfs: balance_leaf refactor, format balance_leaf_paste_left
  reiserfs: balance_leaf refactor, format balance_leaf_insert_left
  reiserfs: balance_leaf refactor, pull out balance_leaf{left, right, new_nodes, finish_node}
  reiserfs: balance_leaf refactor, pull out balance_leaf_finish_node_paste
  reiserfs: balance_leaf refactor pull out balance_leaf_finish_node_insert
  reiserfs: balance_leaf refactor, pull out balance_leaf_new_nodes_paste
  reiserfs: balance_leaf refactor, pull out balance_leaf_new_nodes_insert
  reiserfs: balance_leaf refactor, pull out balance_leaf_paste_right
  reiserfs: balance_leaf refactor, pull out balance_leaf_insert_right
  reiserfs: balance_leaf refactor, pull out balance_leaf_paste_left
  ...
parents 859862dd 19ef1229
......@@ -1716,17 +1716,17 @@ static int ext3_journalled_writepage(struct page *page,
WARN_ON_ONCE(IS_RDONLY(inode) &&
!(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
trace_ext3_journalled_writepage(page);
if (!page_has_buffers(page) || PageChecked(page)) {
if (ext3_journal_current_handle())
goto no_write;
trace_ext3_journalled_writepage(page);
handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
handle = ext3_journal_start(inode,
ext3_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto no_write;
}
if (!page_has_buffers(page) || PageChecked(page)) {
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
......@@ -1749,17 +1749,18 @@ static int ext3_journalled_writepage(struct page *page,
atomic_set(&EXT3_I(inode)->i_datasync_tid,
handle->h_transaction->t_tid);
unlock_page(page);
err = ext3_journal_stop(handle);
if (!ret)
ret = err;
} else {
/*
* It may be a page full of checkpoint-mode buffers. We don't
* really know unless we go poke around in the buffer_heads.
* But block_write_full_page will do the right thing.
* It is a page full of checkpoint-mode buffers. Go and write
* them. They should have been already mapped when they went
* to the journal so provide NULL get_block function to catch
* errors.
*/
ret = block_write_full_page(page, ext3_get_block, wbc);
ret = block_write_full_page(page, NULL, wbc);
}
err = ext3_journal_stop(handle);
if (!ret)
ret = err;
out:
return ret;
......
......@@ -231,19 +231,15 @@ int __init journal_init_revoke_caches(void)
static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
{
int shift = 0;
int tmp = hash_size;
int i;
struct jbd_revoke_table_s *table;
table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
if (!table)
goto out;
while((tmp >>= 1UL) != 0UL)
shift++;
table->hash_size = hash_size;
table->hash_shift = shift;
table->hash_shift = ilog2(hash_size);
table->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!table->hash_table) {
......@@ -252,8 +248,8 @@ static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
goto out;
}
for (tmp = 0; tmp < hash_size; tmp++)
INIT_LIST_HEAD(&table->hash_table[tmp]);
for (i = 0; i < hash_size; i++)
INIT_LIST_HEAD(&table->hash_table[i]);
out:
return table;
......
This diff is collapsed.
......@@ -59,7 +59,10 @@ static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *d
int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
{
struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
/* key of current position in the directory (key of directory entry) */
struct cpu_key pos_key;
INITIALIZE_PATH(path_to_entry);
struct buffer_head *bh;
int item_num, entry_num;
......@@ -77,21 +80,28 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
reiserfs_check_lock_depth(inode->i_sb, "readdir");
/* form key for search the next directory entry using f_pos field of
file structure */
/*
* form key for search the next directory entry using
* f_pos field of file structure
*/
make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
next_pos = cpu_key_k_offset(&pos_key);
path_to_entry.reada = PATH_READA;
while (1) {
research:
/* search the directory item, containing entry with specified key */
research:
/*
* search the directory item, containing entry with
* specified key
*/
search_res =
search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry,
&de);
if (search_res == IO_ERROR) {
// FIXME: we could just skip part of directory which could
// not be read
/*
* FIXME: we could just skip part of directory
* which could not be read
*/
ret = -EIO;
goto out;
}
......@@ -102,41 +112,49 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
store_ih(&tmp_ih, ih);
/* we must have found item, that is item of this directory, */
RFALSE(COMP_SHORT_KEYS(&(ih->ih_key), &pos_key),
RFALSE(COMP_SHORT_KEYS(&ih->ih_key, &pos_key),
"vs-9000: found item %h does not match to dir we readdir %K",
ih, &pos_key);
RFALSE(item_num > B_NR_ITEMS(bh) - 1,
"vs-9005 item_num == %d, item amount == %d",
item_num, B_NR_ITEMS(bh));
/* and entry must be not more than number of entries in the item */
RFALSE(I_ENTRY_COUNT(ih) < entry_num,
/*
* and entry must be not more than number of entries
* in the item
*/
RFALSE(ih_entry_count(ih) < entry_num,
"vs-9010: entry number is too big %d (%d)",
entry_num, I_ENTRY_COUNT(ih));
entry_num, ih_entry_count(ih));
/*
* go through all entries in the directory item beginning
* from the entry, that has been found
*/
if (search_res == POSITION_FOUND
|| entry_num < I_ENTRY_COUNT(ih)) {
/* go through all entries in the directory item beginning from the entry, that has been found */
|| entry_num < ih_entry_count(ih)) {
struct reiserfs_de_head *deh =
B_I_DEH(bh, ih) + entry_num;
for (; entry_num < I_ENTRY_COUNT(ih);
for (; entry_num < ih_entry_count(ih);
entry_num++, deh++) {
int d_reclen;
char *d_name;
ino_t d_ino;
loff_t cur_pos = deh_offset(deh);
if (!de_visible(deh))
/* it is hidden entry */
if (!de_visible(deh))
continue;
d_reclen = entry_length(bh, ih, entry_num);
d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh);
if (d_reclen <= 0 ||
d_name + d_reclen > bh->b_data + bh->b_size) {
/* There is corrupted data in entry,
* We'd better stop here */
/*
* There is corrupted data in entry,
* We'd better stop here
*/
pathrelse(&path_to_entry);
ret = -EIO;
goto out;
......@@ -145,10 +163,10 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
if (!d_name[d_reclen - 1])
d_reclen = strlen(d_name);
/* too big to send back to VFS */
if (d_reclen >
REISERFS_MAX_NAME(inode->i_sb->
s_blocksize)) {
/* too big to send back to VFS */
continue;
}
......@@ -173,10 +191,14 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
goto research;
}
}
// Note, that we copy name to user space via temporary
// buffer (local_buf) because filldir will block if
// user space buffer is swapped out. At that time
// entry can move to somewhere else
/*
* Note, that we copy name to user space via
* temporary buffer (local_buf) because
* filldir will block if user space buffer is
* swapped out. At that time entry can move to
* somewhere else
*/
memcpy(local_buf, d_name, d_reclen);
/*
......@@ -209,22 +231,26 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
} /* for */
}
/* end of directory has been reached */
if (item_num != B_NR_ITEMS(bh) - 1)
// end of directory has been reached
goto end;
/* item we went through is last item of node. Using right
delimiting key check is it directory end */
/*
* item we went through is last item of node. Using right
* delimiting key check is it directory end
*/
rkey = get_rkey(&path_to_entry, inode->i_sb);
if (!comp_le_keys(rkey, &MIN_KEY)) {
/* set pos_key to key, that is the smallest and greater
that key of the last entry in the item */
/*
* set pos_key to key, that is the smallest and greater
* that key of the last entry in the item
*/
set_cpu_key_k_offset(&pos_key, next_pos);
continue;
}
/* end of directory has been reached */
if (COMP_SHORT_KEYS(rkey, &pos_key)) {
// end of directory has been reached
goto end;
}
......@@ -248,71 +274,73 @@ static int reiserfs_readdir(struct file *file, struct dir_context *ctx)
return reiserfs_readdir_inode(file_inode(file), ctx);
}
/* compose directory item containing "." and ".." entries (entries are
not aligned to 4 byte boundary) */
/* the last four params are LE */
/*
* compose directory item containing "." and ".." entries (entries are
* not aligned to 4 byte boundary)
*/
void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
__le32 par_dirid, __le32 par_objid)
{
struct reiserfs_de_head *deh;
struct reiserfs_de_head *dot, *dotdot;
memset(body, 0, EMPTY_DIR_SIZE_V1);
deh = (struct reiserfs_de_head *)body;
dot = (struct reiserfs_de_head *)body;
dotdot = dot + 1;
/* direntry header of "." */
put_deh_offset(&(deh[0]), DOT_OFFSET);
put_deh_offset(dot, DOT_OFFSET);
/* these two are from make_le_item_head, and are are LE */
deh[0].deh_dir_id = dirid;
deh[0].deh_objectid = objid;
deh[0].deh_state = 0; /* Endian safe if 0 */
put_deh_location(&(deh[0]), EMPTY_DIR_SIZE_V1 - strlen("."));
mark_de_visible(&(deh[0]));
dot->deh_dir_id = dirid;
dot->deh_objectid = objid;
dot->deh_state = 0; /* Endian safe if 0 */
put_deh_location(dot, EMPTY_DIR_SIZE_V1 - strlen("."));
mark_de_visible(dot);
/* direntry header of ".." */
put_deh_offset(&(deh[1]), DOT_DOT_OFFSET);
put_deh_offset(dotdot, DOT_DOT_OFFSET);
/* key of ".." for the root directory */
/* these two are from the inode, and are are LE */
deh[1].deh_dir_id = par_dirid;
deh[1].deh_objectid = par_objid;
deh[1].deh_state = 0; /* Endian safe if 0 */
put_deh_location(&(deh[1]), deh_location(&(deh[0])) - strlen(".."));
mark_de_visible(&(deh[1]));
dotdot->deh_dir_id = par_dirid;
dotdot->deh_objectid = par_objid;
dotdot->deh_state = 0; /* Endian safe if 0 */
put_deh_location(dotdot, deh_location(dot) - strlen(".."));
mark_de_visible(dotdot);
/* copy ".." and "." */
memcpy(body + deh_location(&(deh[0])), ".", 1);
memcpy(body + deh_location(&(deh[1])), "..", 2);
memcpy(body + deh_location(dot), ".", 1);
memcpy(body + deh_location(dotdot), "..", 2);
}
/* compose directory item containing "." and ".." entries */
void make_empty_dir_item(char *body, __le32 dirid, __le32 objid,
__le32 par_dirid, __le32 par_objid)
{
struct reiserfs_de_head *deh;
struct reiserfs_de_head *dot, *dotdot;
memset(body, 0, EMPTY_DIR_SIZE);
deh = (struct reiserfs_de_head *)body;
dot = (struct reiserfs_de_head *)body;
dotdot = dot + 1;
/* direntry header of "." */
put_deh_offset(&(deh[0]), DOT_OFFSET);
put_deh_offset(dot, DOT_OFFSET);
/* these two are from make_le_item_head, and are are LE */
deh[0].deh_dir_id = dirid;
deh[0].deh_objectid = objid;
deh[0].deh_state = 0; /* Endian safe if 0 */
put_deh_location(&(deh[0]), EMPTY_DIR_SIZE - ROUND_UP(strlen(".")));
mark_de_visible(&(deh[0]));
dot->deh_dir_id = dirid;
dot->deh_objectid = objid;
dot->deh_state = 0; /* Endian safe if 0 */
put_deh_location(dot, EMPTY_DIR_SIZE - ROUND_UP(strlen(".")));
mark_de_visible(dot);
/* direntry header of ".." */
put_deh_offset(&(deh[1]), DOT_DOT_OFFSET);
put_deh_offset(dotdot, DOT_DOT_OFFSET);
/* key of ".." for the root directory */
/* these two are from the inode, and are are LE */
deh[1].deh_dir_id = par_dirid;
deh[1].deh_objectid = par_objid;
deh[1].deh_state = 0; /* Endian safe if 0 */
put_deh_location(&(deh[1]),
deh_location(&(deh[0])) - ROUND_UP(strlen("..")));
mark_de_visible(&(deh[1]));
dotdot->deh_dir_id = par_dirid;
dotdot->deh_objectid = par_objid;
dotdot->deh_state = 0; /* Endian safe if 0 */
put_deh_location(dotdot, deh_location(dot) - ROUND_UP(strlen("..")));
mark_de_visible(dotdot);
/* copy ".." and "." */
memcpy(body + deh_location(&(deh[0])), ".", 1);
memcpy(body + deh_location(&(deh[1])), "..", 2);
memcpy(body + deh_location(dot), ".", 1);
memcpy(body + deh_location(dotdot), "..", 2);
}
This diff is collapsed.
......@@ -15,20 +15,20 @@
#include <linux/quotaops.h>
/*
** We pack the tails of files on file close, not at the time they are written.
** This implies an unnecessary copy of the tail and an unnecessary indirect item
** insertion/balancing, for files that are written in one write.
** It avoids unnecessary tail packings (balances) for files that are written in
** multiple writes and are small enough to have tails.
**
** file_release is called by the VFS layer when the file is closed. If
** this is the last open file descriptor, and the file
** small enough to have a tail, and the tail is currently in an
** unformatted node, the tail is converted back into a direct item.
**
** We use reiserfs_truncate_file to pack the tail, since it already has
** all the conditions coded.
*/
* We pack the tails of files on file close, not at the time they are written.
* This implies an unnecessary copy of the tail and an unnecessary indirect item
* insertion/balancing, for files that are written in one write.
* It avoids unnecessary tail packings (balances) for files that are written in
* multiple writes and are small enough to have tails.
*
* file_release is called by the VFS layer when the file is closed. If
* this is the last open file descriptor, and the file
* small enough to have a tail, and the tail is currently in an
* unformatted node, the tail is converted back into a direct item.
*
* We use reiserfs_truncate_file to pack the tail, since it already has
* all the conditions coded.
*/
static int reiserfs_file_release(struct inode *inode, struct file *filp)
{
......@@ -41,10 +41,10 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
return 0;
mutex_lock(&(REISERFS_I(inode)->tailpack));
mutex_lock(&REISERFS_I(inode)->tailpack);
if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
mutex_unlock(&(REISERFS_I(inode)->tailpack));
mutex_unlock(&REISERFS_I(inode)->tailpack);
return 0;
}
......@@ -52,31 +52,35 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
!tail_has_to_be_packed(inode)) &&
REISERFS_I(inode)->i_prealloc_count <= 0) {
mutex_unlock(&(REISERFS_I(inode)->tailpack));
mutex_unlock(&REISERFS_I(inode)->tailpack);
return 0;
}
reiserfs_write_lock(inode->i_sb);
/* freeing preallocation only involves relogging blocks that
/*
* freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
* freed at the end of each transaction, so it is impossible for
* us to log any additional blocks (including quota blocks)
*/
err = journal_begin(&th, inode->i_sb, 1);
if (err) {
/* uh oh, we can't allow the inode to go away while there
/*
* uh oh, we can't allow the inode to go away while there
* is still preallocation blocks pending. Try to join the
* aborted transaction
*/
jbegin_failure = err;
err = journal_join_abort(&th, inode->i_sb, 1);
err = journal_join_abort(&th, inode->i_sb);
if (err) {
/* hmpf, our choices here aren't good. We can pin the inode
* which will disallow unmount from every happening, we can
* do nothing, which will corrupt random memory on unmount,
* or we can forcibly remove the file from the preallocation
* list, which will leak blocks on disk. Lets pin the inode
/*
* hmpf, our choices here aren't good. We can pin
* the inode which will disallow unmount from ever
* happening, we can do nothing, which will corrupt
* random memory on unmount, or we can forcibly
* remove the file from the preallocation list, which
* will leak blocks on disk. Lets pin the inode
* and let the admin know what is going on.
*/
igrab(inode);
......@@ -92,7 +96,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
#ifdef REISERFS_PREALLOCATE
reiserfs_discard_prealloc(&th, inode);
#endif
err = journal_end(&th, inode->i_sb, 1);
err = journal_end(&th);
/* copy back the error code from journal_begin */
if (!err)
......@@ -102,35 +106,38 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
tail_has_to_be_packed(inode)) {
/* if regular file is released by last holder and it has been
appended (we append by unformatted node only) or its direct
item(s) had to be converted, then it may have to be
indirect2direct converted */
/*
* if regular file is released by last holder and it has been
* appended (we append by unformatted node only) or its direct
* item(s) had to be converted, then it may have to be
* indirect2direct converted
*/
err = reiserfs_truncate_file(inode, 0);
}
out:
out:
reiserfs_write_unlock(inode->i_sb);
mutex_unlock(&(REISERFS_I(inode)->tailpack));
mutex_unlock(&REISERFS_I(inode)->tailpack);
return err;
}
static int reiserfs_file_open(struct inode *inode, struct file *file)
{
int err = dquot_file_open(inode, file);
if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
/* somebody might be tailpacking on final close; wait for it */
mutex_lock(&(REISERFS_I(inode)->tailpack));
if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
mutex_lock(&REISERFS_I(inode)->tailpack);
atomic_inc(&REISERFS_I(inode)->openers);
mutex_unlock(&(REISERFS_I(inode)->tailpack));
mutex_unlock(&REISERFS_I(inode)->tailpack);
}
return err;
}
void reiserfs_vfs_truncate_file(struct inode *inode)
{
mutex_lock(&(REISERFS_I(inode)->tailpack));
mutex_lock(&REISERFS_I(inode)->tailpack);
reiserfs_truncate_file(inode, 1);
mutex_unlock(&(REISERFS_I(inode)->tailpack));
mutex_unlock(&REISERFS_I(inode)->tailpack);
}
/* Sync a reiserfs file. */
......@@ -205,10 +212,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
set_buffer_uptodate(bh);
if (logit) {
reiserfs_prepare_for_journal(s, bh, 1);
journal_mark_dirty(&th, s, bh);
journal_mark_dirty(&th, bh);
} else if (!buffer_dirty(bh)) {
mark_buffer_dirty(bh);
/* do data=ordered on any page past the end
/*
* do data=ordered on any page past the end
* of file and any buffer marked BH_New.
*/
if (reiserfs_data_ordered(inode->i_sb) &&
......@@ -219,8 +227,8 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
}
}
if (logit) {
ret = journal_end(&th, s, bh_per_page + 1);
drop_write_lock:
ret = journal_end(&th);
drop_write_lock:
reiserfs_write_unlock(s);
}
/*
......
This diff is collapsed.
......@@ -12,12 +12,6 @@
* Yura's function is added (04/07/2000)
*/
//
// keyed_hash
// yura_hash
// r5_hash
//
#include <linux/kernel.h>
#include "reiserfs.h"
#include <asm/types.h>
......@@ -56,7 +50,7 @@ u32 keyed_hash(const signed char *msg, int len)
u32 pad;
int i;
// assert(len >= 0 && len < 256);
/* assert(len >= 0 && len < 256); */
pad = (u32) len | ((u32) len << 8);
pad |= pad << 16;
......@@ -127,9 +121,10 @@ u32 keyed_hash(const signed char *msg, int len)
return h0 ^ h1;
}
/* What follows in this file is copyright 2000 by Hans Reiser, and the
* licensing of what follows is governed by reiserfs/README */
/*
* What follows in this file is copyright 2000 by Hans Reiser, and the
* licensing of what follows is governed by reiserfs/README
*/
u32 yura_hash(const signed char *msg, int len)
{
int j, pow;
......
This diff is collapsed.
This diff is collapsed.
......@@ -15,7 +15,8 @@
* reiserfs_ioctl - handler for ioctl for inode
* supported commands:
* 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
* and prevent packing file (argument arg has to be non-zero)
* and prevent packing file (argument arg has t
* be non-zero)
* 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
* 3) That's all for a while ...
*/
......@@ -132,7 +133,10 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* These are just misnamed, they actually get/put from/to user an int */
/*
* These are just misnamed, they actually
* get/put from/to user an int
*/
switch (cmd) {
case REISERFS_IOC32_UNPACK:
cmd = REISERFS_IOC_UNPACK;
......@@ -160,10 +164,10 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
/*
** reiserfs_unpack
** Function try to convert tail from direct item into indirect.
** It set up nopack attribute in the REISERFS_I(inode)->nopack
*/
* reiserfs_unpack
* Function try to convert tail from direct item into indirect.
* It set up nopack attribute in the REISERFS_I(inode)->nopack
*/
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
......@@ -194,9 +198,10 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
goto out;
}
/* we unpack by finding the page with the tail, and calling
** __reiserfs_write_begin on that page. This will force a
** reiserfs_get_block to unpack the tail for us.
/*
* we unpack by finding the page with the tail, and calling
* __reiserfs_write_begin on that page. This will force a
* reiserfs_get_block to unpack the tail for us.
*/
index = inode->i_size >> PAGE_CACHE_SHIFT;
mapping = inode->i_mapping;
......@@ -214,11 +219,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
retval = reiserfs_commit_write(NULL, page, write_from, write_from);
REISERFS_I(inode)->i_flags |= i_nopack_mask;
out_unlock:
out_unlock:
unlock_page(page);
page_cache_release(page);
out:
out:
mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
return retval;
......
......@@ -5,15 +5,17 @@
#include <linux/time.h>
#include "reiserfs.h"
// this contains item handlers for old item types: sd, direct,
// indirect, directory
/*
* this contains item handlers for old item types: sd, direct,
* indirect, directory
*/
/* and where are the comments? how about saying where we can find an
explanation of each item handler method? -Hans */
/*
* and where are the comments? how about saying where we can find an
* explanation of each item handler method? -Hans
*/
//////////////////////////////////////////////////////////////////////////////
// stat data functions
//
/* stat data functions */
static int sd_bytes_number(struct item_head *ih, int block_size)
{
return 0;
......@@ -60,7 +62,7 @@ static void sd_print_item(struct item_head *ih, char *item)
static void sd_check_item(struct item_head *ih, char *item)
{
// FIXME: type something here!
/* unused */
}
static int sd_create_vi(struct virtual_node *vn,
......@@ -68,7 +70,6 @@ static int sd_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_STAT_DATA;
//vi->vi_type |= VI_TYPE_STAT_DATA;// not needed?
return 0;
}
......@@ -117,15 +118,13 @@ static struct item_operations stat_data_ops = {
.print_vi = sd_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// direct item functions
//
/* direct item functions */
static int direct_bytes_number(struct item_head *ih, int block_size)
{
return ih_item_len(ih);
}
// FIXME: this should probably switch to indirect as well
/* FIXME: this should probably switch to indirect as well */
static void direct_decrement_key(struct cpu_key *key)
{
cpu_key_k_offset_dec(key);
......@@ -144,7 +143,7 @@ static void direct_print_item(struct item_head *ih, char *item)
{
int j = 0;
// return;
/* return; */
printk("\"");
while (j < ih_item_len(ih))
printk("%c", item[j++]);
......@@ -153,7 +152,7 @@ static void direct_print_item(struct item_head *ih, char *item)
static void direct_check_item(struct item_head *ih, char *item)
{
// FIXME: type something here!
/* unused */
}
static int direct_create_vi(struct virtual_node *vn,
......@@ -161,7 +160,6 @@ static int direct_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_DIRECT;
//vi->vi_type |= VI_TYPE_DIRECT;
return 0;
}
......@@ -211,16 +209,13 @@ static struct item_operations direct_ops = {
.print_vi = direct_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// indirect item functions
//
/* indirect item functions */
static int indirect_bytes_number(struct item_head *ih, int block_size)
{
return ih_item_len(ih) / UNFM_P_SIZE * block_size; //- get_ih_free_space (ih);
return ih_item_len(ih) / UNFM_P_SIZE * block_size;
}
// decrease offset, if it becomes 0, change type to stat data
/* decrease offset, if it becomes 0, change type to stat data */
static void indirect_decrement_key(struct cpu_key *key)
{
cpu_key_k_offset_dec(key);
......@@ -228,7 +223,7 @@ static void indirect_decrement_key(struct cpu_key *key)
set_cpu_key_k_type(key, TYPE_STAT_DATA);
}
// if it is not first item of the body, then it is mergeable
/* if it is not first item of the body, then it is mergeable */
static int indirect_is_left_mergeable(struct reiserfs_key *key,
unsigned long bsize)
{
......@@ -236,7 +231,7 @@ static int indirect_is_left_mergeable(struct reiserfs_key *key,
return (le_key_k_offset(version, key) != 1);
}
// printing of indirect item
/* printing of indirect item */
static void start_new_sequence(__u32 * start, int *len, __u32 new)
{
*start = new;
......@@ -295,7 +290,7 @@ static void indirect_print_item(struct item_head *ih, char *item)
static void indirect_check_item(struct item_head *ih, char *item)
{
// FIXME: type something here!
/* unused */
}
static int indirect_create_vi(struct virtual_node *vn,
......@@ -303,7 +298,6 @@ static int indirect_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_INDIRECT;
//vi->vi_type |= VI_TYPE_INDIRECT;
return 0;
}
......@@ -321,16 +315,19 @@ static int indirect_check_right(struct virtual_item *vi, int free)
return indirect_check_left(vi, free, 0, 0);
}
// return size in bytes of 'units' units. If first == 0 - calculate from the head (left), otherwise - from tail (right)
/*
* return size in bytes of 'units' units. If first == 0 - calculate
* from the head (left), otherwise - from tail (right)
*/
static int indirect_part_size(struct virtual_item *vi, int first, int units)
{
// unit of indirect item is byte (yet)
/* unit of indirect item is byte (yet) */
return units;
}
static int indirect_unit_num(struct virtual_item *vi)
{
// unit of indirect item is byte (yet)
/* unit of indirect item is byte (yet) */
return vi->vi_item_len - IH_SIZE;
}
......@@ -356,10 +353,7 @@ static struct item_operations indirect_ops = {
.print_vi = indirect_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// direntry functions
//
/* direntry functions */
static int direntry_bytes_number(struct item_head *ih, int block_size)
{
reiserfs_warning(NULL, "vs-16090",
......@@ -396,7 +390,7 @@ static void direntry_print_item(struct item_head *ih, char *item)
deh = (struct reiserfs_de_head *)item;
for (i = 0; i < I_ENTRY_COUNT(ih); i++, deh++) {
for (i = 0; i < ih_entry_count(ih); i++, deh++) {
namelen =
(i ? (deh_location(deh - 1)) : ih_item_len(ih)) -
deh_location(deh);
......@@ -428,9 +422,9 @@ static void direntry_check_item(struct item_head *ih, char *item)
int i;
struct reiserfs_de_head *deh;
// FIXME: type something here!
/* unused */
deh = (struct reiserfs_de_head *)item;
for (i = 0; i < I_ENTRY_COUNT(ih); i++, deh++) {
for (i = 0; i < ih_entry_count(ih); i++, deh++) {
;
}
}
......@@ -439,7 +433,8 @@ static void direntry_check_item(struct item_head *ih, char *item)
/*
* function returns old entry number in directory item in real node
* using new entry number in virtual item in virtual node */
* using new entry number in virtual item in virtual node
*/
static inline int old_entry_num(int is_affected, int virtual_entry_num,
int pos_in_item, int mode)
{
......@@ -463,9 +458,11 @@ static inline int old_entry_num(int is_affected, int virtual_entry_num,
return virtual_entry_num - 1;
}
/* Create an array of sizes of directory entries for virtual
item. Return space used by an item. FIXME: no control over
consuming of space used by this item handler */
/*
* Create an array of sizes of directory entries for virtual
* item. Return space used by an item. FIXME: no control over
* consuming of space used by this item handler
*/
static int direntry_create_vi(struct virtual_node *vn,
struct virtual_item *vi,
int is_affected, int insert_size)
......@@ -494,8 +491,8 @@ static int direntry_create_vi(struct virtual_node *vn,
j = old_entry_num(is_affected, i, vn->vn_pos_in_item,
vn->vn_mode);
dir_u->entry_sizes[i] =
(j ? deh_location(&(deh[j - 1])) : ih_item_len(vi->vi_ih)) -
deh_location(&(deh[j])) + DEH_SIZE;
(j ? deh_location(&deh[j - 1]) : ih_item_len(vi->vi_ih)) -
deh_location(&deh[j]) + DEH_SIZE;
}
size += (dir_u->entry_count * sizeof(short));
......@@ -529,10 +526,10 @@ static int direntry_create_vi(struct virtual_node *vn,
}
//
// return number of entries which may fit into specified amount of
// free space, or -1 if free space is not enough even for 1 entry
//
/*
* return number of entries which may fit into specified amount of
* free space, or -1 if free space is not enough even for 1 entry
*/
static int direntry_check_left(struct virtual_item *vi, int free,
int start_skip, int end_skip)
{
......@@ -541,8 +538,8 @@ static int direntry_check_left(struct virtual_item *vi, int free,
struct direntry_uarea *dir_u = vi->vi_uarea;
for (i = start_skip; i < dir_u->entry_count - end_skip; i++) {
if (dir_u->entry_sizes[i] > free)
/* i-th entry doesn't fit into the remaining free space */
if (dir_u->entry_sizes[i] > free)
break;
free -= dir_u->entry_sizes[i];
......@@ -570,8 +567,8 @@ static int direntry_check_right(struct virtual_item *vi, int free)
struct direntry_uarea *dir_u = vi->vi_uarea;
for (i = dir_u->entry_count - 1; i >= 0; i--) {
if (dir_u->entry_sizes[i] > free)
/* i-th entry doesn't fit into the remaining free space */
if (dir_u->entry_sizes[i] > free)
break;
free -= dir_u->entry_sizes[i];
......@@ -643,9 +640,7 @@ static struct item_operations direntry_ops = {
.print_vi = direntry_print_vi
};
//////////////////////////////////////////////////////////////////////////////
// Error catching functions to catch errors caused by incorrect item types.
//
/* Error catching functions to catch errors caused by incorrect item types. */
static int errcatch_bytes_number(struct item_head *ih, int block_size)
{
reiserfs_warning(NULL, "green-16001",
......@@ -685,8 +680,12 @@ static int errcatch_create_vi(struct virtual_node *vn,
{
reiserfs_warning(NULL, "green-16006",
"Invalid item type observed, run fsck ASAP");
return 0; // We might return -1 here as well, but it won't help as create_virtual_node() from where
// this operation is called from is of return type void.
/*
* We might return -1 here as well, but it won't help as
* create_virtual_node() from where this operation is called
* from is of return type void.
*/
return 0;
}
static int errcatch_check_left(struct virtual_item *vi, int free,
......@@ -739,9 +738,6 @@ static struct item_operations errcatch_ops = {
errcatch_print_vi
};
//////////////////////////////////////////////////////////////////////////////
//
//
#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
#error Item types must use disk-format assigned values.
#endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -7,7 +7,7 @@
#include <linux/time.h>
#include "reiserfs.h"
// find where objectid map starts
/* find where objectid map starts */
#define objectid_map(s,rs) (old_format_only (s) ? \
(__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
(__le32 *)((rs) + 1))
......@@ -20,7 +20,7 @@ static void check_objectid_map(struct super_block *s, __le32 * map)
reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
(long unsigned int)le32_to_cpu(map[0]));
// FIXME: add something else here
/* FIXME: add something else here */
}
#else
......@@ -29,19 +29,21 @@ static void check_objectid_map(struct super_block *s, __le32 * map)
}
#endif
/* When we allocate objectids we allocate the first unused objectid.
Each sequence of objectids in use (the odd sequences) is followed
by a sequence of objectids not in use (the even sequences). We
only need to record the last objectid in each of these sequences
(both the odd and even sequences) in order to fully define the
boundaries of the sequences. A consequence of allocating the first
objectid not in use is that under most conditions this scheme is
extremely compact. The exception is immediately after a sequence
of operations which deletes a large number of objects of
non-sequential objectids, and even then it will become compact
again as soon as more objects are created. Note that many
interesting optimizations of layout could result from complicating
objectid assignment, but we have deferred making them for now. */
/*
* When we allocate objectids we allocate the first unused objectid.
* Each sequence of objectids in use (the odd sequences) is followed
* by a sequence of objectids not in use (the even sequences). We
* only need to record the last objectid in each of these sequences
* (both the odd and even sequences) in order to fully define the
* boundaries of the sequences. A consequence of allocating the first
* objectid not in use is that under most conditions this scheme is
* extremely compact. The exception is immediately after a sequence
* of operations which deletes a large number of objects of
* non-sequential objectids, and even then it will become compact
* again as soon as more objects are created. Note that many
* interesting optimizations of layout could result from complicating
* objectid assignment, but we have deferred making them for now.
*/
/* get unique object identifier */
__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
......@@ -64,26 +66,30 @@ __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
return 0;
}
/* This incrementation allocates the first unused objectid. That
is to say, the first entry on the objectid map is the first
unused objectid, and by incrementing it we use it. See below
where we check to see if we eliminated a sequence of unused
objectids.... */
/*
* This incrementation allocates the first unused objectid. That
* is to say, the first entry on the objectid map is the first
* unused objectid, and by incrementing it we use it. See below
* where we check to see if we eliminated a sequence of unused
* objectids....
*/
map[1] = cpu_to_le32(unused_objectid + 1);
/* Now we check to see if we eliminated the last remaining member of
the first even sequence (and can eliminate the sequence by
eliminating its last objectid from oids), and can collapse the
first two odd sequences into one sequence. If so, then the net
result is to eliminate a pair of objectids from oids. We do this
by shifting the entire map to the left. */
/*
* Now we check to see if we eliminated the last remaining member of
* the first even sequence (and can eliminate the sequence by
* eliminating its last objectid from oids), and can collapse the
* first two odd sequences into one sequence. If so, then the net
* result is to eliminate a pair of objectids from oids. We do this
* by shifting the entire map to the left.
*/
if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
memmove(map + 1, map + 3,
(sb_oid_cursize(rs) - 3) * sizeof(__u32));
set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
}
journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
return unused_objectid;
}
......@@ -97,30 +103,33 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
int i = 0;
BUG_ON(!th->t_trans_id);
//return;
/*return; */
check_objectid_map(s, map);
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
/* start at the beginning of the objectid map (i = 0) and go to
the end of it (i = disk_sb->s_oid_cursize). Linear search is
what we use, though it is possible that binary search would be
more efficient after performing lots of deletions (which is
when oids is large.) We only check even i's. */
journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
/*
* start at the beginning of the objectid map (i = 0) and go to
* the end of it (i = disk_sb->s_oid_cursize). Linear search is
* what we use, though it is possible that binary search would be
* more efficient after performing lots of deletions (which is
* when oids is large.) We only check even i's.
*/
while (i < sb_oid_cursize(rs)) {
if (objectid_to_release == le32_to_cpu(map[i])) {
/* This incrementation unallocates the objectid. */
//map[i]++;
le32_add_cpu(&map[i], 1);
/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
/*
* Did we unallocate the last member of an
* odd sequence, and can shrink oids?
*/
if (map[i] == map[i + 1]) {
/* shrink objectid map */
memmove(map + i, map + i + 2,
(sb_oid_cursize(rs) - i -
2) * sizeof(__u32));
//disk_sb->s_oid_cursize -= 2;
set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
RFALSE(sb_oid_cursize(rs) < 2 ||
......@@ -135,14 +144,19 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
objectid_to_release < le32_to_cpu(map[i + 1])) {
/* size of objectid map is not changed */
if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
//objectid_map[i+1]--;
le32_add_cpu(&map[i + 1], -1);
return;
}
/* JDM comparing two little-endian values for equality -- safe */
/*
* JDM comparing two little-endian values for
* equality -- safe
*/
/*
* objectid map must be expanded, but
* there is no space
*/
if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
/* objectid map must be expanded, but there is no space */
PROC_INFO_INC(s, leaked_oid);
return;
}
......@@ -178,8 +192,9 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
new_objectid_map = (__le32 *) (disk_sb + 1);
if (cur_size > new_size) {
/* mark everyone used that was listed as free at the end of the objectid
** map
/*
* mark everyone used that was listed as free at
* the end of the objectid map
*/
objectid_map[new_size - 1] = objectid_map[cur_size - 1];
set_sb_oid_cursize(disk_sb, new_size);
......
This diff is collapsed.
This diff is collapsed.
......@@ -53,8 +53,10 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
}
bforget(bh);
/* old disk layout detection; those partitions can be mounted, but
* cannot be resized */
/*
* old disk layout detection; those partitions can be mounted, but
* cannot be resized
*/
if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
!= REISERFS_DISK_OFFSET_IN_BYTES) {
printk
......@@ -86,12 +88,14 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
return -ENOMEM;
}
/* the new journal bitmaps are zero filled, now we copy in the bitmap
** node pointers from the old journal bitmap structs, and then
** transfer the new data structures into the journal struct.
**
** using the copy_size var below allows this code to work for
** both shrinking and expanding the FS.
/*
* the new journal bitmaps are zero filled, now we copy i
* the bitmap node pointers from the old journal bitmap
* structs, and then transfer the new data structures
* into the journal struct.
*
* using the copy_size var below allows this code to work for
* both shrinking and expanding the FS.
*/
copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
copy_size =
......@@ -101,36 +105,45 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
jb = SB_JOURNAL(s)->j_list_bitmap + i;
memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
/* just in case vfree schedules on us, copy the new
** pointer into the journal struct before freeing the
** old one
/*
* just in case vfree schedules on us, copy the new
* pointer into the journal struct before freeing the
* old one
*/
node_tmp = jb->bitmaps;
jb->bitmaps = jbitmap[i].bitmaps;
vfree(node_tmp);
}
/* allocate additional bitmap blocks, reallocate array of bitmap
* block pointers */
/*
* allocate additional bitmap blocks, reallocate
* array of bitmap block pointers
*/
bitmap =
vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
if (!bitmap) {
/* Journal bitmaps are still supersized, but the memory isn't
* leaked, so I guess it's ok */
/*
* Journal bitmaps are still supersized, but the
* memory isn't leaked, so I guess it's ok
*/
printk("reiserfs_resize: unable to allocate memory.\n");
return -ENOMEM;
}
for (i = 0; i < bmap_nr; i++)
bitmap[i] = old_bitmap[i];
/* This doesn't go through the journal, but it doesn't have to.
* The changes are still atomic: We're synced up when the journal
* transaction begins, and the new bitmaps don't matter if the
* transaction fails. */
/*
* This doesn't go through the journal, but it doesn't have to.
* The changes are still atomic: We're synced up when the
* journal transaction begins, and the new bitmaps don't
* matter if the transaction fails.
*/
for (i = bmap_nr; i < bmap_nr_new; i++) {
int depth;
/* don't use read_bitmap_block since it will cache
* the uninitialized bitmap */
/*
* don't use read_bitmap_block since it will cache
* the uninitialized bitmap
*/
depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, i * s->s_blocksize * 8);
reiserfs_write_lock_nested(s, depth);
......@@ -147,7 +160,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(bh);
reiserfs_write_lock_nested(s, depth);
// update bitmap_info stuff
/* update bitmap_info stuff */
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
}
......@@ -156,9 +169,11 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
vfree(old_bitmap);
}
/* begin transaction, if there was an error, it's fine. Yes, we have
/*
* begin transaction, if there was an error, it's fine. Yes, we have
* incorrect bitmaps now, but none of it is ever going to touch the
* disk anyway. */
* disk anyway.
*/
err = journal_begin(&th, s, 10);
if (err)
return err;
......@@ -167,7 +182,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
info = SB_AP_BITMAP(s) + bmap_nr - 1;
bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
if (!bh) {
int jerr = journal_end(&th, s, 10);
int jerr = journal_end(&th);
if (jerr)
return jerr;
return -EIO;
......@@ -178,14 +193,14 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
reiserfs_clear_le_bit(i, bh->b_data);
info->free_count += s->s_blocksize * 8 - block_r;
journal_mark_dirty(&th, s, bh);
journal_mark_dirty(&th, bh);
brelse(bh);
/* Correct new last bitmap block - It may not be full */
info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
if (!bh) {
int jerr = journal_end(&th, s, 10);
int jerr = journal_end(&th);
if (jerr)
return jerr;
return -EIO;
......@@ -194,7 +209,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
reiserfs_prepare_for_journal(s, bh, 1);
for (i = block_r_new; i < s->s_blocksize * 8; i++)
reiserfs_set_le_bit(i, bh->b_data);
journal_mark_dirty(&th, s, bh);
journal_mark_dirty(&th, bh);
brelse(bh);
info->free_count -= s->s_blocksize * 8 - block_r_new;
......@@ -207,8 +222,8 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
PUT_SB_BLOCK_COUNT(s, block_count_new);
PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
SB_JOURNAL(s)->j_must_wait = 1;
return journal_end(&th, s, 10);
return journal_end(&th);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -61,7 +61,8 @@ static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
return ret;
}
/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
/*
* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
* Let's try to be smart about it.
* xattr root: We cache it. If it's not cached, we may need to create it.
* xattr dir: If anything has been loaded for this inode, we can set a flag
......
......@@ -25,8 +25,10 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int size = acl ? posix_acl_xattr_size(acl->a_count) : 0;
/* Pessimism: We can't assume that anything from the xattr root up
* has been created. */
/*
* Pessimism: We can't assume that anything from the xattr root up
* has been created.
*/
jcreate_blocks = reiserfs_xattr_jcreate_nblocks(inode) +
reiserfs_xattr_nblocks(inode, size) * 2;
......@@ -37,7 +39,7 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (error == 0) {
error = __reiserfs_set_acl(&th, inode, type, acl);
reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
error2 = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
if (error2)
error = error2;
......@@ -111,7 +113,7 @@ static struct posix_acl *reiserfs_posix_acl_from_disk(const void *value, size_t
goto fail;
return acl;
fail:
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
......@@ -164,7 +166,7 @@ static void *reiserfs_posix_acl_to_disk(const struct posix_acl *acl, size_t * si
}
return (char *)ext_acl;
fail:
fail:
kfree(ext_acl);
return ERR_PTR(-EINVAL);
}
......@@ -208,8 +210,10 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
retval = reiserfs_xattr_get(inode, name, value, size);
if (retval == -ENODATA || retval == -ENOSYS) {
/* This shouldn't actually happen as it should have
been caught above.. but just in case */
/*
* This shouldn't actually happen as it should have
* been caught above.. but just in case
*/
acl = NULL;
} else if (retval < 0) {
acl = ERR_PTR(retval);
......@@ -290,8 +294,10 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
return error;
}
/* dir->i_mutex: locked,
* inode is new and not released into the wild yet */
/*
* dir->i_mutex: locked,
* inode is new and not released into the wild yet
*/
int
reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
struct inode *dir, struct dentry *dentry,
......@@ -304,14 +310,18 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
if (S_ISLNK(inode->i_mode))
return 0;
/* ACLs can only be used on "new" objects, so if it's an old object
* there is nothing to inherit from */
/*
* ACLs can only be used on "new" objects, so if it's an old object
* there is nothing to inherit from
*/
if (get_inode_sd_version(dir) == STAT_DATA_V1)
goto apply_umask;
/* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
/*
* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
* would be useless since permissions are ignored, and a pain because
* it introduces locking cycles */
* it introduces locking cycles
*/
if (IS_PRIVATE(dir)) {
inode->i_flags |= S_PRIVATE;
goto apply_umask;
......@@ -335,7 +345,7 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
return err;
apply_umask:
apply_umask:
/* no ACL, apply umask */
inode->i_mode &= ~current_umask();
return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment