Commit 8e099d1e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "Bug fixes and clean ups for the 3.17 merge window"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix ext4_discard_allocated_blocks() if we can't allocate the pa struct
  ext4: fix COLLAPSE RANGE test for bigalloc file systems
  ext4: check inline directory before converting
  ext4: fix incorrect locking in move_extent_per_page
  ext4: use correct depth value
  ext4: add i_data_sem sanity check
  ext4: fix wrong size computation in ext4_mb_normalize_request()
  ext4: make ext4_has_inline_data() as a inline function
  ext4: remove readpage() check in ext4_mmap_file()
  ext4: fix punch hole on files with indirect mapping
  ext4: remove metadata reservation checks
  ext4: rearrange initialization to fix EXT4FS_DEBUG
parents b54ecfb7 86f0afd4
...@@ -639,7 +639,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ...@@ -639,7 +639,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
if (!(*errp) && if (!(*errp) &&
ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
spin_lock(&EXT4_I(inode)->i_block_reservation_lock); spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode, dquot_alloc_block_nofail(inode,
EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
......
...@@ -571,6 +571,31 @@ static int ext4_release_dir(struct inode *inode, struct file *filp) ...@@ -571,6 +571,31 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
return 0; return 0;
} }
int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
int buf_size)
{
struct ext4_dir_entry_2 *de;
int nlen, rlen;
unsigned int offset = 0;
char *top;
de = (struct ext4_dir_entry_2 *)buf;
top = buf + buf_size;
while ((char *) de < top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
buf, buf_size, offset))
return -EIO;
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
if ((char *) de > top)
return -EIO;
return 0;
}
const struct file_operations ext4_dir_operations = { const struct file_operations ext4_dir_operations = {
.llseek = ext4_dir_llseek, .llseek = ext4_dir_llseek,
.read = generic_read_dir, .read = generic_read_dir,
......
...@@ -591,7 +591,6 @@ enum { ...@@ -591,7 +591,6 @@ enum {
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008 #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
#define EXT4_FREE_BLOCKS_RESERVE 0x0040
/* /*
* ioctl commands * ioctl commands
...@@ -2029,6 +2028,8 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype) ...@@ -2029,6 +2028,8 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype)
return ext4_filetype_table[filetype]; return ext4_filetype_table[filetype];
} }
extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
void *buf, int buf_size);
/* fsync.c */ /* fsync.c */
extern int ext4_sync_file(struct file *, loff_t, loff_t, int); extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
...@@ -2144,8 +2145,8 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, ...@@ -2144,8 +2145,8 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
extern void ext4_ind_truncate(handle_t *, struct inode *inode); extern void ext4_ind_truncate(handle_t *, struct inode *inode);
extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
ext4_lblk_t first, ext4_lblk_t stop); ext4_lblk_t start, ext4_lblk_t end);
/* ioctl.c */ /* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
...@@ -2560,7 +2561,6 @@ extern const struct file_operations ext4_file_operations; ...@@ -2560,7 +2561,6 @@ extern const struct file_operations ext4_file_operations;
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin); extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
/* inline.c */ /* inline.c */
extern int ext4_has_inline_data(struct inode *inode);
extern int ext4_get_max_inline_size(struct inode *inode); extern int ext4_get_max_inline_size(struct inode *inode);
extern int ext4_find_inline_data_nolock(struct inode *inode); extern int ext4_find_inline_data_nolock(struct inode *inode);
extern int ext4_init_inline_data(handle_t *handle, struct inode *inode, extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
...@@ -2626,6 +2626,12 @@ extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline); ...@@ -2626,6 +2626,12 @@ extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode); extern int ext4_convert_inline_data(struct inode *inode);
static inline int ext4_has_inline_data(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
EXT4_I(inode)->i_inline_off;
}
/* namei.c */ /* namei.c */
extern const struct inode_operations ext4_dir_inode_operations; extern const struct inode_operations ext4_dir_inode_operations;
extern const struct inode_operations ext4_special_inode_operations; extern const struct inode_operations ext4_special_inode_operations;
......
...@@ -161,6 +161,8 @@ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, ...@@ -161,6 +161,8 @@ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
struct inode *inode, struct ext4_ext_path *path) struct inode *inode, struct ext4_ext_path *path)
{ {
int err; int err;
WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
if (path->p_bh) { if (path->p_bh) {
ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
/* path points to block */ /* path points to block */
...@@ -1808,8 +1810,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle, ...@@ -1808,8 +1810,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
brelse(path[1].p_bh); brelse(path[1].p_bh);
ext4_free_blocks(handle, inode, NULL, blk, 1, ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
EXT4_FREE_BLOCKS_RESERVE);
} }
/* /*
...@@ -3253,7 +3254,7 @@ static int ext4_split_extent_at(handle_t *handle, ...@@ -3253,7 +3254,7 @@ static int ext4_split_extent_at(handle_t *handle,
fix_extent_len: fix_extent_len:
ex->ee_len = orig_ex.ee_len; ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + depth); ext4_ext_dirty(handle, inode, path + path->p_depth);
return err; return err;
} }
...@@ -5403,16 +5404,13 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) ...@@ -5403,16 +5404,13 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
int ret; int ret;
/* Collapse range works only on fs block size aligned offsets. */ /* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_BLOCK_SIZE(sb) - 1) || if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_BLOCK_SIZE(sb) - 1)) len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL; return -EINVAL;
if (!S_ISREG(inode->i_mode)) if (!S_ISREG(inode->i_mode))
return -EINVAL; return -EINVAL;
if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1)
return -EOPNOTSUPP;
trace_ext4_collapse_range(inode, offset, len); trace_ext4_collapse_range(inode, offset, len);
punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
......
...@@ -200,10 +200,6 @@ static const struct vm_operations_struct ext4_file_vm_ops = { ...@@ -200,10 +200,6 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{ {
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file); file_accessed(file);
vma->vm_ops = &ext4_file_vm_ops; vma->vm_ops = &ext4_file_vm_ops;
return 0; return 0;
......
...@@ -1295,97 +1295,220 @@ void ext4_ind_truncate(handle_t *handle, struct inode *inode) ...@@ -1295,97 +1295,220 @@ void ext4_ind_truncate(handle_t *handle, struct inode *inode)
} }
} }
static int free_hole_blocks(handle_t *handle, struct inode *inode, /**
struct buffer_head *parent_bh, __le32 *i_data, * ext4_ind_remove_space - remove space from the range
int level, ext4_lblk_t first, * @handle: JBD handle for this transaction
ext4_lblk_t count, int max) * @inode: inode we are dealing with
* @start: First block to remove
* @end: One block after the last block to remove (exclusive)
*
* Free the blocks in the defined range (end is exclusive endpoint of
* range). This is used by ext4_punch_hole().
*/
int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
ext4_lblk_t start, ext4_lblk_t end)
{ {
struct buffer_head *bh = NULL; struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *i_data = ei->i_data;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ret = 0; ext4_lblk_t offsets[4], offsets2[4];
int i, inc; Indirect chain[4], chain2[4];
ext4_lblk_t offset; Indirect *partial, *partial2;
__le32 blk; ext4_lblk_t max_block;
__le32 nr = 0, nr2 = 0;
inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level); int n = 0, n2 = 0;
for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) { unsigned blocksize = inode->i_sb->s_blocksize;
if (offset >= count + first)
break;
if (*i_data == 0 || (offset + inc) <= first)
continue;
blk = *i_data;
if (level > 0) {
ext4_lblk_t first2;
ext4_lblk_t count2;
bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
if (!bh) { >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), if (end >= max_block)
"Read failure"); end = max_block;
return -EIO; if ((start >= end) || (start > max_block))
} return 0;
if (first > offset) {
first2 = first - offset; n = ext4_block_to_path(inode, start, offsets, NULL);
count2 = count; n2 = ext4_block_to_path(inode, end, offsets2, NULL);
BUG_ON(n > n2);
if ((n == 1) && (n == n2)) {
/* We're punching only within direct block range */
ext4_free_data(handle, inode, NULL, i_data + offsets[0],
i_data + offsets2[0]);
return 0;
} else if (n2 > n) {
/*
* Start and end are on a different levels so we're going to
* free partial block at start, and partial block at end of
* the range. If there are some levels in between then
* do_indirects label will take care of that.
*/
if (n == 1) {
/*
* Start is at the direct block level, free
* everything to the end of the level.
*/
ext4_free_data(handle, inode, NULL, i_data + offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto end_range;
}
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
ext4_free_branches(handle, inode, NULL,
&nr, &nr+1, (chain+n-1) - partial);
*partial->p = 0;
} else { } else {
first2 = 0; /* Shared branch grows from an indirect block */
count2 = count - (offset - first); BUFFER_TRACE(partial->bh, "get_write_access");
ext4_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
} }
ret = free_hole_blocks(handle, inode, bh, }
(__le32 *)bh->b_data, level - 1,
first2, count2, /*
inode->i_sb->s_blocksize >> 2); * Clear the ends of indirect blocks on the shared branch
if (ret) { * at the start of the range
brelse(bh); */
goto err; while (partial > chain) {
ext4_free_branches(handle, inode, partial->bh,
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
end_range:
partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
if (nr2) {
if (partial2 == chain2) {
/*
* Remember, end is exclusive so here we're at
* the start of the next level we're not going
* to free. Everything was covered by the start
* of the range.
*/
return 0;
} else {
/* Shared branch grows from an indirect block */
partial2--;
} }
} else {
/*
* ext4_find_shared returns Indirect structure which
* points to the last element which should not be
* removed by truncate. But this is end of the range
* in punch_hole so we need to point to the next element
*/
partial2->p++;
} }
if (level == 0 ||
(bh && all_zeroes((__le32 *)bh->b_data, /*
(__le32 *)bh->b_data + addr_per_block))) { * Clear the ends of indirect blocks on the shared branch
ext4_free_data(handle, inode, parent_bh, * at the end of the range
i_data, i_data + 1); */
while (partial2 > chain2) {
ext4_free_branches(handle, inode, partial2->bh,
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n2-1) - partial2);
BUFFER_TRACE(partial2->bh, "call brelse");
brelse(partial2->bh);
partial2--;
} }
brelse(bh); goto do_indirects;
bh = NULL;
} }
err: /* Punch happened within the same level (n == n2) */
return ret; partial = ext4_find_shared(inode, n, offsets, chain, &nr);
} partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
/*
int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, * ext4_find_shared returns Indirect structure which
ext4_lblk_t first, ext4_lblk_t stop) * points to the last element which should not be
{ * removed by truncate. But this is end of the range
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); * in punch_hole so we need to point to the next element
int level, ret = 0; */
int num = EXT4_NDIR_BLOCKS; partial2->p++;
ext4_lblk_t count, max = EXT4_NDIR_BLOCKS; while ((partial > chain) || (partial2 > chain2)) {
__le32 *i_data = EXT4_I(inode)->i_data; /* We're at the same block, so we're almost finished */
if ((partial->bh && partial2->bh) &&
count = stop - first; (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
for (level = 0; level < 4; level++, max *= addr_per_block) { if ((partial > chain) && (partial2 > chain2)) {
if (first < max) { ext4_free_branches(handle, inode, partial->bh,
ret = free_hole_blocks(handle, inode, NULL, i_data, partial->p + 1,
level, first, count, num); partial2->p,
if (ret) (chain+n-1) - partial);
goto err; BUFFER_TRACE(partial->bh, "call brelse");
if (count > max - first) brelse(partial->bh);
count -= max - first; BUFFER_TRACE(partial2->bh, "call brelse");
else brelse(partial2->bh);
break; }
first = 0; return 0;
} else {
first -= max;
} }
i_data += num; /*
if (level == 0) { * Clear the ends of indirect blocks on the shared branch
num = 1; * at the start of the range
max = 1; */
if (partial > chain) {
ext4_free_branches(handle, inode, partial->bh,
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
/*
* Clear the ends of indirect blocks on the shared branch
* at the end of the range
*/
if (partial2 > chain2) {
ext4_free_branches(handle, inode, partial2->bh,
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n-1) - partial2);
BUFFER_TRACE(partial2->bh, "call brelse");
brelse(partial2->bh);
partial2--;
} }
} }
err: do_indirects:
return ret; /* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
if (++n >= n2)
return 0;
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
case EXT4_IND_BLOCK:
if (++n >= n2)
return 0;
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
case EXT4_DIND_BLOCK:
if (++n >= n2)
return 0;
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
case EXT4_TIND_BLOCK:
;
}
return 0;
} }
...@@ -120,12 +120,6 @@ int ext4_get_max_inline_size(struct inode *inode) ...@@ -120,12 +120,6 @@ int ext4_get_max_inline_size(struct inode *inode)
return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE; return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
} }
int ext4_has_inline_data(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
EXT4_I(inode)->i_inline_off;
}
/* /*
* this function does not take xattr_sem, which is OK because it is * this function does not take xattr_sem, which is OK because it is
* currently only used in a code path coming form ext4_iget, before * currently only used in a code path coming form ext4_iget, before
...@@ -1178,6 +1172,18 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, ...@@ -1178,6 +1172,18 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
if (error < 0) if (error < 0)
goto out; goto out;
/*
* Make sure the inline directory entries pass checks before we try to
* convert them, so that we avoid touching stuff that needs fsck.
*/
if (S_ISDIR(inode->i_mode)) {
error = ext4_check_all_de(inode, iloc->bh,
buf + EXT4_INLINE_DOTDOT_SIZE,
inline_size - EXT4_INLINE_DOTDOT_SIZE);
if (error)
goto out;
}
error = ext4_destroy_inline_data_nolock(handle, inode); error = ext4_destroy_inline_data_nolock(handle, inode);
if (error) if (error)
goto out; goto out;
......
...@@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode) ...@@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
} }
#endif #endif
/*
* Calculate the number of metadata blocks need to reserve
* to allocate a block located at @lblock
*/
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_calc_metadata_amount(inode, lblock);
return ext4_ind_calc_metadata_amount(inode, lblock);
}
/* /*
* Called with i_data_sem down, which is important since we can call * Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here. * ext4_discard_preallocations() from here.
...@@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode, ...@@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
used = ei->i_reserved_data_blocks; used = ei->i_reserved_data_blocks;
} }
if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
ext4_warning(inode->i_sb, "ino %lu, allocated %d "
"with only %d reserved metadata blocks "
"(releasing %d blocks with reserved %d data blocks)",
inode->i_ino, ei->i_allocated_meta_blocks,
ei->i_reserved_meta_blocks, used,
ei->i_reserved_data_blocks);
WARN_ON(1);
ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
}
/* Update per-inode reservations */ /* Update per-inode reservations */
ei->i_reserved_data_blocks -= used; ei->i_reserved_data_blocks -= used;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
used + ei->i_allocated_meta_blocks);
ei->i_allocated_meta_blocks = 0;
if (ei->i_reserved_data_blocks == 0) {
/*
* We can release all of the reserved metadata blocks
* only when we have written all of the delayed
* allocation blocks.
*/
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
ei->i_reserved_meta_blocks);
ei->i_reserved_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
}
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem for data blocks */ /* Update quota subsystem for data blocks */
...@@ -1221,49 +1184,6 @@ static int ext4_journalled_write_end(struct file *file, ...@@ -1221,49 +1184,6 @@ static int ext4_journalled_write_end(struct file *file,
return ret ? ret : copied; return ret ? ret : copied;
} }
/*
* Reserve a metadata for a single block located at lblock
*/
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
ext4_lblk_t save_last_lblock;
int save_len;
/*
* recalculate the amount of metadata blocks to reserve
* in order to allocate nrblocks
* worse case is one extent per block
*/
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
* to be prepared undo if we fail to claim space.
*/
save_len = ei->i_da_metadata_calc_len;
save_last_lblock = ei->i_da_metadata_calc_last_lblock;
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
/*
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
return -ENOSPC;
}
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
}
/* /*
* Reserve a single cluster located at lblock * Reserve a single cluster located at lblock
*/ */
...@@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) ...@@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed; unsigned int md_needed;
int ret; int ret;
ext4_lblk_t save_last_lblock;
int save_len;
/* /*
* We will charge metadata quota at writeout time; this saves * We will charge metadata quota at writeout time; this saves
...@@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) ...@@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
* ext4_calc_metadata_amount() has side effects, which we have * ext4_calc_metadata_amount() has side effects, which we have
* to be prepared undo if we fail to claim space. * to be prepared undo if we fail to claim space.
*/ */
save_len = ei->i_da_metadata_calc_len; md_needed = 0;
save_last_lblock = ei->i_da_metadata_calc_last_lblock; trace_ext4_da_reserve_space(inode, 0);
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
/* if (ext4_claim_free_clusters(sbi, 1, 0)) {
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock); spin_unlock(&ei->i_block_reservation_lock);
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC; return -ENOSPC;
} }
ei->i_reserved_data_blocks++; ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock); spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */ return 0; /* success */
...@@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free) ...@@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
} }
ei->i_reserved_data_blocks -= to_free; ei->i_reserved_data_blocks -= to_free;
if (ei->i_reserved_data_blocks == 0) {
/*
* We can release all of the reserved metadata blocks
* only when we have written all of the delayed
* allocation blocks.
* Note that in case of bigalloc, i_reserved_meta_blocks,
* i_reserved_data_blocks, etc. refer to number of clusters.
*/
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
ei->i_reserved_meta_blocks);
ei->i_reserved_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
}
/* update fs dirty data blocks counter */ /* update fs dirty data blocks counter */
percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
...@@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode) ...@@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
ext4_msg(sb, KERN_CRIT, "Block reservation details"); ext4_msg(sb, KERN_CRIT, "Block reservation details");
ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
ei->i_reserved_data_blocks); ei->i_reserved_data_blocks);
ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
ei->i_reserved_meta_blocks);
ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
ei->i_allocated_meta_blocks);
return; return;
} }
...@@ -1620,13 +1510,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, ...@@ -1620,13 +1510,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
retval = ret; retval = ret;
goto out_unlock; goto out_unlock;
} }
} else {
ret = ext4_da_reserve_metadata(inode, iblock);
if (ret) {
/* not enough space to reserve */
retval = ret;
goto out_unlock;
}
} }
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
...@@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode) ...@@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
{ {
trace_ext4_alloc_da_blocks(inode); trace_ext4_alloc_da_blocks(inode);
if (!EXT4_I(inode)->i_reserved_data_blocks && if (!EXT4_I(inode)->i_reserved_data_blocks)
!EXT4_I(inode)->i_reserved_meta_blocks)
return 0; return 0;
/* /*
...@@ -3624,7 +3506,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) ...@@ -3624,7 +3506,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
ret = ext4_ext_remove_space(inode, first_block, ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1); stop_block - 1);
else else
ret = ext4_free_hole_blocks(handle, inode, first_block, ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block); stop_block);
up_write(&EXT4_I(inode)->i_data_sem); up_write(&EXT4_I(inode)->i_data_sem);
......
...@@ -3075,8 +3075,9 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ...@@ -3075,8 +3075,9 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
(23 - bsbits)) << 23; (23 - bsbits)) << 23;
size = 8 * 1024 * 1024; size = 8 * 1024 * 1024;
} else { } else {
start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
size = ac->ac_o_ex.fe_len << bsbits; size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
ac->ac_o_ex.fe_len) << bsbits;
} }
size = size >> bsbits; size = size >> bsbits;
start = start_off >> bsbits; start = start_off >> bsbits;
...@@ -3216,8 +3217,27 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) ...@@ -3216,8 +3217,27 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
{ {
struct ext4_prealloc_space *pa = ac->ac_pa; struct ext4_prealloc_space *pa = ac->ac_pa;
struct ext4_buddy e4b;
int err;
if (pa && pa->pa_type == MB_INODE_PA) if (pa == NULL) {
err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
if (err) {
/*
* This should never happen since we pin the
* pages in the ext4_allocation_context so
* ext4_mb_load_buddy() should never fail.
*/
WARN(1, "mb_load_buddy failed (%d)", err);
return;
}
ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
ac->ac_f_ex.fe_len);
ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
return;
}
if (pa->pa_type == MB_INODE_PA)
pa->pa_free += ac->ac_b_ex.fe_len; pa->pa_free += ac->ac_b_ex.fe_len;
} }
...@@ -4627,7 +4647,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4627,7 +4647,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *gd_bh; struct buffer_head *gd_bh;
ext4_group_t block_group; ext4_group_t block_group;
struct ext4_sb_info *sbi; struct ext4_sb_info *sbi;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_buddy e4b; struct ext4_buddy e4b;
unsigned int count_clusters; unsigned int count_clusters;
int err = 0; int err = 0;
...@@ -4838,19 +4857,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4838,19 +4857,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi->s_flex_groups[flex_group].free_clusters);
} }
if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) { if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
percpu_counter_add(&sbi->s_dirtyclusters_counter,
count_clusters);
spin_lock(&ei->i_block_reservation_lock);
if (flags & EXT4_FREE_BLOCKS_METADATA)
ei->i_reserved_meta_blocks += count_clusters;
else
ei->i_reserved_data_blocks += count_clusters;
spin_unlock(&ei->i_block_reservation_lock);
if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_reclaim_block(inode,
EXT4_C2B(sbi, count_clusters));
} else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
......
...@@ -39,6 +39,8 @@ static int finish_range(handle_t *handle, struct inode *inode, ...@@ -39,6 +39,8 @@ static int finish_range(handle_t *handle, struct inode *inode,
newext.ee_block = cpu_to_le32(lb->first_block); newext.ee_block = cpu_to_le32(lb->first_block);
newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
ext4_ext_store_pblock(&newext, lb->first_pblock); ext4_ext_store_pblock(&newext, lb->first_pblock);
/* Locking only for convinience since we are operating on temp inode */
down_write(&EXT4_I(inode)->i_data_sem);
path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0); path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) { if (IS_ERR(path)) {
...@@ -61,7 +63,9 @@ static int finish_range(handle_t *handle, struct inode *inode, ...@@ -61,7 +63,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
*/ */
if (needed && ext4_handle_has_enough_credits(handle, if (needed && ext4_handle_has_enough_credits(handle,
EXT4_RESERVE_TRANS_BLOCKS)) { EXT4_RESERVE_TRANS_BLOCKS)) {
up_write((&EXT4_I(inode)->i_data_sem));
retval = ext4_journal_restart(handle, needed); retval = ext4_journal_restart(handle, needed);
down_write((&EXT4_I(inode)->i_data_sem));
if (retval) if (retval)
goto err_out; goto err_out;
} else if (needed) { } else if (needed) {
...@@ -70,13 +74,16 @@ static int finish_range(handle_t *handle, struct inode *inode, ...@@ -70,13 +74,16 @@ static int finish_range(handle_t *handle, struct inode *inode,
/* /*
* IF not able to extend the journal restart the journal * IF not able to extend the journal restart the journal
*/ */
up_write((&EXT4_I(inode)->i_data_sem));
retval = ext4_journal_restart(handle, needed); retval = ext4_journal_restart(handle, needed);
down_write((&EXT4_I(inode)->i_data_sem));
if (retval) if (retval)
goto err_out; goto err_out;
} }
} }
retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
err_out: err_out:
up_write((&EXT4_I(inode)->i_data_sem));
if (path) { if (path) {
ext4_ext_drop_refs(path); ext4_ext_drop_refs(path);
kfree(path); kfree(path);
......
...@@ -1013,10 +1013,11 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, ...@@ -1013,10 +1013,11 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
*err = -EBUSY; *err = -EBUSY;
goto unlock_pages; goto unlock_pages;
} }
ext4_double_down_write_data_sem(orig_inode, donor_inode);
replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
orig_blk_offset, orig_blk_offset,
block_len_in_page, err); block_len_in_page, err);
ext4_double_up_write_data_sem(orig_inode, donor_inode);
if (*err) { if (*err) {
if (replaced_count) { if (replaced_count) {
block_len_in_page = replaced_count; block_len_in_page = replaced_count;
......
...@@ -2142,10 +2142,6 @@ static int ext4_check_descriptors(struct super_block *sb, ...@@ -2142,10 +2142,6 @@ static int ext4_check_descriptors(struct super_block *sb,
} }
if (NULL != first_not_zeroed) if (NULL != first_not_zeroed)
*first_not_zeroed = grp; *first_not_zeroed = grp;
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, ext4_count_free_clusters(sb)));
sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
return 1; return 1;
} }
...@@ -3883,13 +3879,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3883,13 +3879,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
goto failed_mount2; goto failed_mount2;
} }
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
goto failed_mount2;
}
sbi->s_gdb_count = db_count; sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32)); get_random_bytes(&sbi->s_next_generation, sizeof(u32));
...@@ -3902,23 +3891,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3902,23 +3891,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Register extent status tree shrinker */ /* Register extent status tree shrinker */
ext4_es_register_shrinker(sbi); ext4_es_register_shrinker(sbi);
err = percpu_counter_init(&sbi->s_freeclusters_counter, if ((err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0)) != 0) {
ext4_count_free_clusters(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
}
if (!err) {
err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
}
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory"); ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3; goto failed_mount3;
} }
...@@ -4022,18 +3995,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4022,18 +3995,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
/*
* The journal may have updated the bg summary counts, so we
* need to update the global counters.
*/
percpu_counter_set(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
percpu_counter_set(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
percpu_counter_set(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
no_journal: no_journal:
if (ext4_mballoc_ready) { if (ext4_mballoc_ready) {
sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id); sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
...@@ -4141,6 +4102,33 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4141,6 +4102,33 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount5; goto failed_mount5;
} }
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
err = percpu_counter_init(&sbi->s_freeclusters_counter, block);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei);
}
if (!err)
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
if (!err)
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount6;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
goto failed_mount6;
}
err = ext4_register_li_request(sb, first_not_zeroed); err = ext4_register_li_request(sb, first_not_zeroed);
if (err) if (err)
goto failed_mount6; goto failed_mount6;
...@@ -4215,6 +4203,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4215,6 +4203,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_unregister_li_request(sb); ext4_unregister_li_request(sb);
failed_mount6: failed_mount6:
ext4_mb_release(sb); ext4_mb_release(sb);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
failed_mount5: failed_mount5:
ext4_ext_release(sb); ext4_ext_release(sb);
ext4_release_system_zone(sb); ext4_release_system_zone(sb);
...@@ -4233,12 +4227,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4233,12 +4227,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
failed_mount3: failed_mount3:
ext4_es_unregister_shrinker(sbi); ext4_es_unregister_shrinker(sbi);
del_timer_sync(&sbi->s_err_report); del_timer_sync(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_counter_destroy(&sbi->s_extent_cache_cnt); percpu_counter_destroy(&sbi->s_extent_cache_cnt);
if (sbi->s_mmp_tsk) if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk); kthread_stop(sbi->s_mmp_tsk);
...@@ -4556,11 +4544,13 @@ static int ext4_commit_super(struct super_block *sb, int sync) ...@@ -4556,11 +4544,13 @@ static int ext4_commit_super(struct super_block *sb, int sync)
else else
es->s_kbytes_written = es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
ext4_free_blocks_count_set(es, if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
ext4_free_blocks_count_set(es,
EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeclusters_counter))); &EXT4_SB(sb)->s_freeclusters_counter)));
es->s_free_inodes_count = if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
cpu_to_le32(percpu_counter_sum_positive( es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter)); &EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty"); BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb); ext4_superblock_csum_set(sb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment