Commit e040f218 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: copy_pte_range progress fix

My latency breaking in copy_pte_range didn't work as intended: instead of
checking at regularish intervals, after the first interval it checked every
time around the loop, too impatient to be preempted.  Fix that.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 09ad4bbc
......@@ -410,7 +410,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
pte_t *src_pte, *dst_pte;
unsigned long vm_flags = vma->vm_flags;
int progress;
int progress = 0;
again:
dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
......@@ -418,17 +418,19 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return -ENOMEM;
src_pte = pte_offset_map_nested(src_pmd, addr);
progress = 0;
spin_lock(&src_mm->page_table_lock);
do {
/*
* We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU.
*/
if (progress >= 32 && (need_resched() ||
need_lockbreak(&src_mm->page_table_lock) ||
need_lockbreak(&dst_mm->page_table_lock)))
break;
if (progress >= 32) {
progress = 0;
if (need_resched() ||
need_lockbreak(&src_mm->page_table_lock) ||
need_lockbreak(&dst_mm->page_table_lock))
break;
}
if (pte_none(*src_pte)) {
progress++;
continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment