Commit 74de593a authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: read contiguous sit entry pages by merging for mount performance

Previously we read sit entries page one by one, this method lost the chance
of reading contiguous page together. So we read pages as contiguous as
possible for better mount performance.

change log:
 o merge judgements/use 'Continue' or 'Break' instead of 'Goto' as Gu Zheng
   suggested.
 o add mark_page_accessed() before release page to delay VM reclaiming.
 o remove '*order' for simplification of function as Jaegeuk Kim suggested.
Signed-off-by: default avatarChao Yu <chao2.yu@samsung.com>
[Jaegeuk Kim: fix a bug on the block address calculation]
Signed-off-by: default avatarJaegeuk Kim <jaegeuk.kim@samsung.com>
parent d4d288bc
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/swap.h>
#include "f2fs.h" #include "f2fs.h"
#include "segment.h" #include "segment.h"
...@@ -1706,19 +1707,64 @@ static int build_curseg(struct f2fs_sb_info *sbi) ...@@ -1706,19 +1707,64 @@ static int build_curseg(struct f2fs_sb_info *sbi)
return restore_curseg_summaries(sbi); return restore_curseg_summaries(sbi);
} }
static int ra_sit_pages(struct f2fs_sb_info *sbi, int start, int nrpages)
{
struct address_space *mapping = sbi->meta_inode->i_mapping;
struct page *page;
block_t blk_addr, prev_blk_addr = 0;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
int blkno = start;
for (; blkno < start + nrpages && blkno < sit_blk_cnt; blkno++) {
blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK);
if (blkno != start && prev_blk_addr + 1 != blk_addr)
break;
prev_blk_addr = blk_addr;
repeat:
page = grab_cache_page(mapping, blk_addr);
if (!page) {
cond_resched();
goto repeat;
}
if (PageUptodate(page)) {
mark_page_accessed(page);
f2fs_put_page(page, 1);
continue;
}
submit_read_page(sbi, page, blk_addr, READ_SYNC);
mark_page_accessed(page);
f2fs_put_page(page, 0);
}
f2fs_submit_read_bio(sbi, READ_SYNC);
return blkno - start;
}
static void build_sit_entries(struct f2fs_sb_info *sbi) static void build_sit_entries(struct f2fs_sb_info *sbi)
{ {
struct sit_info *sit_i = SIT_I(sbi); struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk; struct f2fs_summary_block *sum = curseg->sum_blk;
unsigned int start; int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
for (start = 0; start < TOTAL_SEGS(sbi); start++) { do {
readed = ra_sit_pages(sbi, start_blk, nrpages);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < TOTAL_SEGS(sbi); start++) {
struct seg_entry *se = &sit_i->sentries[start]; struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk; struct f2fs_sit_block *sit_blk;
struct f2fs_sit_entry sit; struct f2fs_sit_entry sit;
struct page *page; struct page *page;
int i;
mutex_lock(&curseg->curseg_mutex); mutex_lock(&curseg->curseg_mutex);
for (i = 0; i < sits_in_cursum(sum); i++) { for (i = 0; i < sits_in_cursum(sum); i++) {
...@@ -1729,6 +1775,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) ...@@ -1729,6 +1775,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
} }
} }
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
page = get_current_sit_page(sbi, start); page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page); sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
...@@ -1741,6 +1788,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) ...@@ -1741,6 +1788,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
e->valid_blocks += se->valid_blocks; e->valid_blocks += se->valid_blocks;
} }
} }
start_blk += readed;
} while (start_blk < sit_blk_cnt);
} }
static void init_free_segmap(struct f2fs_sb_info *sbi) static void init_free_segmap(struct f2fs_sb_info *sbi)
......
...@@ -78,6 +78,8 @@ ...@@ -78,6 +78,8 @@
(segno / SIT_ENTRY_PER_BLOCK) (segno / SIT_ENTRY_PER_BLOCK)
#define START_SEGNO(sit_i, segno) \ #define START_SEGNO(sit_i, segno) \
(SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK) (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK)
#define SIT_BLK_CNT(sbi) \
((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
#define f2fs_bitmap_size(nr) \ #define f2fs_bitmap_size(nr) \
(BITS_TO_LONGS(nr) * sizeof(unsigned long)) (BITS_TO_LONGS(nr) * sizeof(unsigned long))
#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments) #define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment