Commit 7df3a431 authored by Fan Li's avatar Fan Li Committed by Jaegeuk Kim

f2fs: optimize the flow of f2fs_map_blocks

check map->m_len right after it changes to avoid excess call
to update dnode_of_data.
Signed-off-by: default avatarFan li <fanofcode.li@samsung.com>
Reviewed-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 36b35a0d
...@@ -573,6 +573,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -573,6 +573,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int err = 0, ofs = 1; int err = 0, ofs = 1;
struct extent_info ei; struct extent_info ei;
bool allocated = false; bool allocated = false;
block_t blkaddr;
map->m_len = 0; map->m_len = 0;
map->m_flags = 0; map->m_flags = 0;
...@@ -636,6 +637,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -636,6 +637,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
pgofs++; pgofs++;
get_next: get_next:
if (map->m_len >= maxblocks)
goto sync_out;
if (dn.ofs_in_node >= end_offset) { if (dn.ofs_in_node >= end_offset) {
if (allocated) if (allocated)
sync_inode_page(&dn); sync_inode_page(&dn);
...@@ -653,44 +657,43 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -653,44 +657,43 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
} }
if (maxblocks > map->m_len) { blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) { if (create) {
if (unlikely(f2fs_cp_error(sbi))) { if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO; err = -EIO;
goto sync_out; goto sync_out;
}
err = __allocate_data_block(&dn);
if (err)
goto sync_out;
allocated = true;
map->m_flags |= F2FS_MAP_NEW;
blkaddr = dn.data_blkaddr;
} else {
/*
* we only merge preallocated unwritten blocks
* for fiemap.
*/
if (flag != F2FS_GET_BLOCK_FIEMAP ||
blkaddr != NEW_ADDR)
goto sync_out;
} }
err = __allocate_data_block(&dn);
if (err)
goto sync_out;
allocated = true;
map->m_flags |= F2FS_MAP_NEW;
blkaddr = dn.data_blkaddr;
} else {
/*
* we only merge preallocated unwritten blocks
* for fiemap.
*/
if (flag != F2FS_GET_BLOCK_FIEMAP ||
blkaddr != NEW_ADDR)
goto sync_out;
} }
}
/* Give more consecutive addresses for the readahead */ /* Give more consecutive addresses for the readahead */
if ((map->m_pblk != NEW_ADDR && if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) || blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && (map->m_pblk == NEW_ADDR &&
blkaddr == NEW_ADDR)) { blkaddr == NEW_ADDR)) {
ofs++; ofs++;
dn.ofs_in_node++; dn.ofs_in_node++;
pgofs++; pgofs++;
map->m_len++; map->m_len++;
goto get_next; goto get_next;
}
} }
sync_out: sync_out:
if (allocated) if (allocated)
sync_inode_page(&dn); sync_inode_page(&dn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment