Commit 0c942a45 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: msync_pte_range progress

Use latency breaking in msync_pte_range like that in copy_pte_range, instead
of the ugly CONFIG_PREEMPT filemap_msync alternatives.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e040f218
...@@ -26,12 +26,21 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -26,12 +26,21 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
pte_t *pte; pte_t *pte;
int progress = 0;
again:
pte = pte_offset_map(pmd, addr); pte = pte_offset_map(pmd, addr);
do { do {
unsigned long pfn; unsigned long pfn;
struct page *page; struct page *page;
if (progress >= 64) {
progress = 0;
if (need_resched() ||
need_lockbreak(&vma->vm_mm->page_table_lock))
break;
}
progress++;
if (!pte_present(*pte)) if (!pte_present(*pte))
continue; continue;
if (!pte_maybe_dirty(*pte)) if (!pte_maybe_dirty(*pte))
...@@ -46,8 +55,12 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -46,8 +55,12 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (ptep_clear_flush_dirty(vma, addr, pte) || if (ptep_clear_flush_dirty(vma, addr, pte) ||
page_test_and_clear_dirty(page)) page_test_and_clear_dirty(page))
set_page_dirty(page); set_page_dirty(page);
progress += 3;
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1); pte_unmap(pte - 1);
cond_resched_lock(&vma->vm_mm->page_table_lock);
if (addr != end)
goto again;
} }
static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud, static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
...@@ -106,29 +119,6 @@ static void msync_page_range(struct vm_area_struct *vma, ...@@ -106,29 +119,6 @@ static void msync_page_range(struct vm_area_struct *vma,
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
#ifdef CONFIG_PREEMPT
static inline void filemap_msync(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
const size_t chunk = 64 * 1024; /* bytes */
unsigned long next;
do {
next = addr + chunk;
if (next > end || next < addr)
next = end;
msync_page_range(vma, addr, next);
cond_resched();
} while (addr = next, addr != end);
}
#else
static inline void filemap_msync(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
msync_page_range(vma, addr, end);
}
#endif
/* /*
* MS_SYNC syncs the entire file - including mappings. * MS_SYNC syncs the entire file - including mappings.
* *
...@@ -150,7 +140,7 @@ static int msync_interval(struct vm_area_struct *vma, ...@@ -150,7 +140,7 @@ static int msync_interval(struct vm_area_struct *vma,
return -EBUSY; return -EBUSY;
if (file && (vma->vm_flags & VM_SHARED)) { if (file && (vma->vm_flags & VM_SHARED)) {
filemap_msync(vma, addr, end); msync_page_range(vma, addr, end);
if (flags & MS_SYNC) { if (flags & MS_SYNC) {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment