Commit 275b12bf authored by Wu Fengguang's avatar Wu Fengguang Committed by Linus Torvalds

readahead: return early when readahead is disabled

Reduce readahead overheads by returning early in do_sync_mmap_readahead().

tmpfs has ra_pages=0 and it can page fault really fast (not constraint by
IO if not swapping).
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Tested-by: default avatarTim Chen <tim.c.chen@intel.com>
Reported-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1495f230
...@@ -1556,6 +1556,8 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma, ...@@ -1556,6 +1556,8 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
/* If we don't want any read-ahead, don't bother */ /* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma)) if (VM_RandomReadHint(vma))
return; return;
if (!ra->ra_pages)
return;
if (VM_SequentialReadHint(vma) || if (VM_SequentialReadHint(vma) ||
offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) { offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
...@@ -1578,12 +1580,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma, ...@@ -1578,12 +1580,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
* mmap read-around * mmap read-around
*/ */
ra_pages = max_sane_readahead(ra->ra_pages); ra_pages = max_sane_readahead(ra->ra_pages);
if (ra_pages) { ra->start = max_t(long, 0, offset - ra_pages / 2);
ra->start = max_t(long, 0, offset - ra_pages/2);
ra->size = ra_pages; ra->size = ra_pages;
ra->async_size = 0; ra->async_size = 0;
ra_submit(ra, mapping, file); ra_submit(ra, mapping, file);
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment