Commit fe7885a4 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://test1.bkbits.net/system-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents d0096496 30746bbd
......@@ -49,6 +49,9 @@ changes occur:
page table operations such as what happens during
fork, and exec.
Platform developers note that generic code will always
invoke this interface without mm->page_table_lock held.
3) void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
......@@ -69,6 +72,9 @@ changes occur:
call flush_tlb_page (see below) for each entry which may be
modified.
Platform developers note that generic code will always
invoke this interface with mm->page_table_lock held.
4) void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
This time we need to remove the PAGE_SIZE sized translation
......@@ -87,6 +93,9 @@ changes occur:
This is used primarily during fault processing.
Platform developers note that generic code will always
invoke this interface with mm->page_table_lock held.
5) void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
......
......@@ -77,6 +77,13 @@ The page_table_lock is grabbed while holding the kernel_lock spinning monitor.
The page_table_lock is a spin lock.
Note: PTL can also be used to guarantee that no new clones using the
mm start up ... this is a loose form of stability on mm_users. For
example, it is used in copy_mm to protect against a racing tlb_gather_mmu
single address space optimization, so that the zap_page_range (from
vmtruncate) does not loose sending ipi's to cloned threads that might
be spawned underneath it and go to user mode to drag in pte's into tlbs.
swap_list_lock/swap_device_lock
-------------------------------
The swap devices are chained in priority order from the "swap_list" header.
......
......@@ -367,6 +367,13 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
/*
* There are cases where the PTL is held to ensure no
* new threads start up in user mode using an mm, which
* allows optimizing out ipis; the tlb_gather_mmu code
* is an example.
*/
spin_unlock_wait(&oldmm->page_table_lock);
goto good_mm;
}
......
......@@ -839,8 +839,8 @@ int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsign
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, beg, end);
spin_unlock(&mm->page_table_lock);
return error;
}
......@@ -922,8 +922,8 @@ int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned lo
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (from && (from < end));
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, beg, end);
spin_unlock(&mm->page_table_lock);
return error;
}
......
......@@ -92,8 +92,8 @@ static void change_protection(struct vm_area_struct *vma, unsigned long start, u
start = (start + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (start && (start < end));
spin_unlock(&current->mm->page_table_lock);
flush_tlb_range(vma, beg, end);
spin_unlock(&current->mm->page_table_lock);
return;
}
......
......@@ -81,8 +81,9 @@ static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
return error;
}
static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
static int move_one_page(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr)
{
struct mm_struct *mm = vma->vm_mm;
int error = 0;
pte_t *src, *dst;
......@@ -94,6 +95,7 @@ static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned
pte_unmap_nested(src);
pte_unmap(dst);
}
flush_tlb_page(vma, old_addr);
spin_unlock(&mm->page_table_lock);
return error;
}
......@@ -113,10 +115,9 @@ static int move_page_tables(struct vm_area_struct *vma,
*/
while (offset) {
offset -= PAGE_SIZE;
if (move_one_page(mm, old_addr + offset, new_addr + offset))
if (move_one_page(vma, old_addr + offset, new_addr + offset))
goto oops_we_failed;
}
flush_tlb_range(vma, old_addr, old_addr + len);
return 0;
/*
......@@ -129,7 +130,7 @@ static int move_page_tables(struct vm_area_struct *vma,
oops_we_failed:
flush_cache_range(vma, new_addr, new_addr + len);
while ((offset += PAGE_SIZE) < len)
move_one_page(mm, new_addr + offset, old_addr + offset);
move_one_page(vma, new_addr + offset, old_addr + offset);
zap_page_range(vma, new_addr, len);
return -1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment