Commit 8f1d4983 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: compress: remove unneeded preallocation

We will reserve iblocks for compression saved, so during compressed
cluster overwrite, we don't need to preallocate blocks for later
write.

In addition, it adds a bug_on to detect wrong reserved iblock number
in __f2fs_cluster_blocks().

Bug fix in the original patch by Jaegeuk:
If we released compressed blocks having an immutable bit, we can see less
number of compressed block addresses. Let's fix wrong BUG_ON.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 8939a848
...@@ -926,6 +926,9 @@ static int __f2fs_cluster_blocks(struct inode *inode, ...@@ -926,6 +926,9 @@ static int __f2fs_cluster_blocks(struct inode *inode,
ret++; ret++;
} }
} }
f2fs_bug_on(F2FS_I_SB(inode),
!compr && ret != cluster_size && !IS_IMMUTABLE(inode));
} }
fail: fail:
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
...@@ -984,21 +987,16 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, ...@@ -984,21 +987,16 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct address_space *mapping = cc->inode->i_mapping; struct address_space *mapping = cc->inode->i_mapping;
struct page *page; struct page *page;
struct dnode_of_data dn;
sector_t last_block_in_bio; sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc); pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret; int i, ret;
bool prealloc;
retry: retry:
ret = f2fs_is_compressed_cluster(cc->inode, start_idx); ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
/* compressed case */
prealloc = (ret < cc->cluster_size);
ret = f2fs_init_compress_ctx(cc); ret = f2fs_init_compress_ctx(cc);
if (ret) if (ret)
return ret; return ret;
...@@ -1056,25 +1054,6 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, ...@@ -1056,25 +1054,6 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
} }
} }
if (prealloc) {
f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
for (i = cc->cluster_size - 1; i > 0; i--) {
ret = f2fs_get_block(&dn, start_idx + i);
if (ret) {
i = cc->cluster_size;
break;
}
if (dn.data_blkaddr != NEW_ADDR)
break;
}
f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
if (likely(!ret)) { if (likely(!ret)) {
*fsdata = cc->rpages; *fsdata = cc->rpages;
*pagep = cc->rpages[offset_in_cluster(cc, index)]; *pagep = cc->rpages[offset_in_cluster(cc, index)];
......
...@@ -85,10 +85,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) ...@@ -85,10 +85,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
err = ret; err = ret;
goto err; goto err;
} else if (ret) { } else if (ret) {
if (ret < F2FS_I(inode)->i_cluster_size) {
err = -EAGAIN;
goto err;
}
need_alloc = false; need_alloc = false;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment