Commit 33ce6140 authored by Heiko Carstens's avatar Heiko Carstens

[S390] mm: add page fault retry handling

s390 arch backend for d065bd81 "mm: retry page fault when blocking on
disk transfer".
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
parent 99583181
...@@ -280,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -280,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access,
struct mm_struct *mm; struct mm_struct *mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long address; unsigned long address;
int fault, write; unsigned int flags;
int fault;
if (notify_page_fault(regs)) if (notify_page_fault(regs))
return 0; return 0;
...@@ -299,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -299,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
address = trans_exc_code & __FAIL_ADDR_MASK; address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
fault = VM_FAULT_BADMAP; fault = VM_FAULT_BADMAP;
...@@ -328,13 +333,16 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -328,13 +333,16 @@ static inline int do_exception(struct pt_regs *regs, int access,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
write = (access == VM_WRITE || fault = handle_mm_fault(mm, vma, address, flags);
(trans_exc_code & store_indication) == 0x400) ?
FAULT_FLAG_WRITE : 0;
fault = handle_mm_fault(mm, vma, address, write);
if (unlikely(fault & VM_FAULT_ERROR)) if (unlikely(fault & VM_FAULT_ERROR))
goto out_up; goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++; tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
...@@ -344,6 +352,13 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -344,6 +352,13 @@ static inline int do_exception(struct pt_regs *regs, int access,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address); regs, address);
} }
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
/* /*
* The instruction that caused the program check will * The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP. * be repeated. Don't signal single step via SIGTRAP.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment