Commit 45cac65b authored by Shaohua Li's avatar Shaohua Li Committed by Linus Torvalds

readahead: fault retry breaks mmap file read random detection

.fault now can retry.  The retry can break state machine of .fault.  In
filemap_fault, if page is miss, ra->mmap_miss is increased.  In the second
try, since the page is in page cache now, ra->mmap_miss is decreased.  And
these are done in one fault, so we can't detect random mmap file access.

Add a new flag to indicate .fault is tried once.  In the second try, skip
ra->mmap_miss decreasing.  The filemap_fault state machine is ok with it.

I only tested x86, didn't test other archs, but looks the change for other
archs is obvious, but who knows :)
Signed-off-by: default avatarShaohua Li <shaohua.li@fusionio.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e79bee24
...@@ -336,6 +336,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -336,6 +336,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */ * of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry; goto retry;
} }
} }
......
...@@ -152,6 +152,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) ...@@ -152,6 +152,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
tsk->min_flt++; tsk->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would have * No need to up_read(&mm->mmap_sem) as we would have
......
...@@ -186,6 +186,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, ...@@ -186,6 +186,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
tsk->min_flt++; tsk->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to up_read(&mm->mmap_sem) as we would
......
...@@ -113,6 +113,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) ...@@ -113,6 +113,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
current->min_flt++; current->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry; goto retry;
} }
} }
......
...@@ -184,6 +184,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -184,6 +184,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
current->min_flt++; current->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
......
...@@ -170,6 +170,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -170,6 +170,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */ * of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to up_read(&mm->mmap_sem) as we would
......
...@@ -233,6 +233,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -233,6 +233,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
current->min_flt++; current->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to up_read(&mm->mmap_sem) as we would
......
...@@ -171,6 +171,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ ...@@ -171,6 +171,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
} }
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to up_read(&mm->mmap_sem) as we would
......
...@@ -183,6 +183,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -183,6 +183,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
tsk->min_flt++; tsk->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
......
...@@ -451,6 +451,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -451,6 +451,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */ * of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry; goto retry;
} }
} }
......
...@@ -367,6 +367,7 @@ static inline int do_exception(struct pt_regs *regs, int access) ...@@ -367,6 +367,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */ * of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
goto retry; goto retry;
} }
......
...@@ -504,6 +504,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -504,6 +504,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
} }
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to up_read(&mm->mmap_sem) as we would
......
...@@ -265,6 +265,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -265,6 +265,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
} }
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
......
...@@ -452,6 +452,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -452,6 +452,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
} }
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
......
...@@ -454,6 +454,7 @@ static int handle_page_fault(struct pt_regs *regs, ...@@ -454,6 +454,7 @@ static int handle_page_fault(struct pt_regs *regs,
tsk->min_flt++; tsk->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* /*
* No need to up_read(&mm->mmap_sem) as we would * No need to up_read(&mm->mmap_sem) as we would
......
...@@ -89,6 +89,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, ...@@ -89,6 +89,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
current->min_flt++; current->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry; goto retry;
} }
......
...@@ -1220,6 +1220,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -1220,6 +1220,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */ * of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry; goto retry;
} }
} }
......
...@@ -126,6 +126,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -126,6 +126,7 @@ void do_page_fault(struct pt_regs *regs)
current->min_flt++; current->min_flt++;
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY; flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would /* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry * have already released it in __lock_page_or_retry
......
...@@ -161,6 +161,7 @@ extern pgprot_t protection_map[16]; ...@@ -161,6 +161,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ #define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ #define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ #define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x40 /* second try */
/* /*
* vm_fault is filled by the the pagefault handler and passed to the vma's * vm_fault is filled by the the pagefault handler and passed to the vma's
......
...@@ -1607,13 +1607,13 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1607,13 +1607,13 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Do we have something in the page cache already? * Do we have something in the page cache already?
*/ */
page = find_get_page(mapping, offset); page = find_get_page(mapping, offset);
if (likely(page)) { if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
/* /*
* We found the page, so try async readahead before * We found the page, so try async readahead before
* waiting for the lock. * waiting for the lock.
*/ */
do_async_mmap_readahead(vma, ra, file, page, offset); do_async_mmap_readahead(vma, ra, file, page, offset);
} else { } else if (!page) {
/* No page in the page cache at all */ /* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset); do_sync_mmap_readahead(vma, ra, file, offset);
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment