Commit 123e4df7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap 9 remove pte_chains

From: Hugh Dickins <hugh@veritas.com>

Lots of deletions: the next patch will put in the new anon rmap, which
should look clearer if first we remove all of the old pte-pointer-based
rmap from the core in this patch - which therefore leaves anonymous rmap
totally disabled, anon pages locked in memory until process frees them.

Leave arch files (and page table rmap) untouched for now, clean them up in
a later batch.  A few constructive changes amidst all the deletions:

Choose names (e.g.  page_add_anon_rmap) and args (e.g.  no more pteps) now
so we need not revisit so many files in the next patch.  Inline function
page_dup_rmap for fork's copy_page_range, simply bumps mapcount under lock.
 cond_resched_lock in copy_page_range.  Struct page rearranged: no pte
union, just mapcount moved next to atomic count, so two ints can occupy one
long on 64-bit; i386 struct page now 32 bytes even with PAE.  Never pass
PageReserved to page_remove_rmap, only do_wp_page did so.


From: Hugh Dickins <hugh@veritas.com>

  Move page_add_anon_rmap's BUG_ON(page_mapping(page)) inside the rmap_lock
  (well, might as well just check mapping if !mapcount then): if this page is
  being mapped or unmapped on another cpu at the same time, page_mapping's
  PageAnon(page) and page->mapping are volatile.

  But page_mapping(page) is used more widely: I've a nasty feeling that
  clear_page_anon, page_add_anon_rmap and/or page_mapping need barriers added
  (also in 2.6.6 itself),
parent b33a7bad
......@@ -293,53 +293,42 @@ EXPORT_SYMBOL(copy_strings_kernel);
* This routine is used to map in a page into an address space: needed by
* execve() for the initial stack and environment pages.
*
* tsk->mmap_sem is held for writing.
* tsk->mm->mmap_sem is held for writing.
*/
void put_dirty_page(struct task_struct *tsk, struct page *page,
unsigned long address, pgprot_t prot)
{
struct mm_struct *mm = tsk->mm;
pgd_t * pgd;
pmd_t * pmd;
pte_t * pte;
struct pte_chain *pte_chain;
if (page_count(page) != 1)
printk(KERN_ERR "mem_map disagrees with %p at %08lx\n",
page, address);
pgd = pgd_offset(tsk->mm, address);
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain)
goto out_sig;
spin_lock(&tsk->mm->page_table_lock);
pmd = pmd_alloc(tsk->mm, pgd, address);
pgd = pgd_offset(mm, address);
spin_lock(&mm->page_table_lock);
pmd = pmd_alloc(mm, pgd, address);
if (!pmd)
goto out;
pte = pte_alloc_map(tsk->mm, pmd, address);
pte = pte_alloc_map(mm, pmd, address);
if (!pte)
goto out;
if (!pte_none(*pte)) {
pte_unmap(pte);
goto out;
}
mm->rss++;
lru_cache_add_active(page);
flush_dcache_page(page);
set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
pte_chain = page_add_rmap(page, pte, pte_chain);
page_add_anon_rmap(page, mm, address);
pte_unmap(pte);
tsk->mm->rss++;
spin_unlock(&tsk->mm->page_table_lock);
spin_unlock(&mm->page_table_lock);
/* no need for flush_tlb */
pte_chain_free(pte_chain);
return;
out:
spin_unlock(&tsk->mm->page_table_lock);
out_sig:
spin_unlock(&mm->page_table_lock);
__free_page(page);
force_sig(SIGKILL, tsk);
pte_chain_free(pte_chain);
return;
}
int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
......
......@@ -147,8 +147,6 @@ struct vm_operations_struct {
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
};
/* forward declaration; pte_chain is meant to be internal to rmap.c */
struct pte_chain;
struct mmu_gather;
struct inode;
......@@ -170,28 +168,26 @@ typedef unsigned long page_flags_t;
*
* The first line is data used in page cache lookup, the second line
* is used for linear searches (eg. clock algorithm scans).
*
* TODO: make this structure smaller, it could be as small as 32 bytes.
*/
struct page {
page_flags_t flags; /* atomic flags, some possibly
updated asynchronously */
page_flags_t flags; /* Atomic flags, some possibly
* updated asynchronously */
atomic_t _count; /* Usage count, see below. */
struct address_space *mapping; /* The inode (or ...) we belong to. */
pgoff_t index; /* Our offset within mapping. */
struct list_head lru; /* Pageout list, eg. active_list;
protected by zone->lru_lock !! */
union {
struct pte_chain *chain;/* Reverse pte mapping pointer.
* protected by PG_chainlock */
pte_addr_t direct;
unsigned int mapcount; /* Count ptes mapped into mms */
} pte;
unsigned int mapcount; /* Count of ptes mapped in mms,
* to show when page is mapped
* & limit reverse map searches,
* protected by PG_maplock.
*/
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache
*/
struct address_space *mapping; /* The inode (or ...) we belong to. */
pgoff_t index; /* Our offset within mapping. */
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
......@@ -440,13 +436,11 @@ static inline pgoff_t page_index(struct page *page)
}
/*
* Return true if this page is mapped into pagetables. Subtle: test pte.direct
* rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain
* is only 32-bit.
* Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
{
return page->pte.direct != 0;
return page->mapcount != 0;
}
/*
......
......@@ -71,12 +71,12 @@
#define PG_nosave 14 /* Used for system suspend/resume */
#define PG_maplock 15 /* Lock bit for rmap to ptes */
#define PG_direct 16 /* ->pte_chain points directly at pte */
#define PG_swapcache 16 /* Swap page: swp_entry_t in private */
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
#define PG_reclaim 18 /* To be reclaimed asap */
#define PG_compound 19 /* Part of a compound page */
#define PG_anon 20 /* Anonymous page: anon_vma in mapping*/
#define PG_swapcache 21 /* Swap page: swp_entry_t in private */
#define PG_anon 20 /* Anonymous page: anonmm in mapping */
/*
......@@ -281,12 +281,6 @@ extern void get_full_page_state(struct page_state *ret);
#define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags)
#define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags)
#define PageDirect(page) test_bit(PG_direct, &(page)->flags)
#define SetPageDirect(page) set_bit(PG_direct, &(page)->flags)
#define TestSetPageDirect(page) test_and_set_bit(PG_direct, &(page)->flags)
#define ClearPageDirect(page) clear_bit(PG_direct, &(page)->flags)
#define TestClearPageDirect(page) test_and_clear_bit(PG_direct, &(page)->flags)
#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
......
......@@ -15,21 +15,25 @@
#ifdef CONFIG_MMU
struct pte_chain;
struct pte_chain *pte_chain_alloc(int gfp_flags);
void __pte_chain_free(struct pte_chain *pte_chain);
static inline void pte_chain_free(struct pte_chain *pte_chain)
void fastcall page_add_anon_rmap(struct page *,
struct mm_struct *, unsigned long addr);
void fastcall page_add_file_rmap(struct page *);
void fastcall page_remove_rmap(struct page *);
/**
* page_dup_rmap - duplicate pte mapping to a page
* @page: the page to add the mapping to
*
* For copy_page_range only: minimal extract from page_add_rmap,
* avoiding unnecessary tests (already checked) so it's quicker.
*/
static inline void page_dup_rmap(struct page *page)
{
if (pte_chain)
__pte_chain_free(pte_chain);
page_map_lock(page);
page->mapcount++;
page_map_unlock(page);
}
struct pte_chain * fastcall
page_add_rmap(struct page *, pte_t *, struct pte_chain *);
void fastcall page_add_file_rmap(struct page *);
void fastcall page_remove_rmap(struct page *, pte_t *);
/*
* Called from mm/vmscan.c to handle paging out
*/
......
......@@ -84,7 +84,6 @@ extern void signals_init(void);
extern void buffer_init(void);
extern void pidhash_init(void);
extern void pidmap_init(void);
extern void pte_chain_init(void);
extern void radix_tree_init(void);
extern void free_initmem(void);
extern void populate_rootfs(void);
......@@ -460,7 +459,6 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
pgtable_cache_init();
pte_chain_init();
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
......
......@@ -36,7 +36,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if (!PageReserved(page)) {
if (pte_dirty(pte))
set_page_dirty(page);
page_remove_rmap(page, ptep);
page_remove_rmap(page);
page_cache_release(page);
mm->rss--;
}
......
......@@ -217,20 +217,10 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end;
unsigned long cow;
struct pte_chain *pte_chain = NULL;
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst, src, vma);
pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN);
if (!pte_chain) {
spin_unlock(&dst->page_table_lock);
pte_chain = pte_chain_alloc(GFP_KERNEL);
spin_lock(&dst->page_table_lock);
if (!pte_chain)
goto nomem;
}
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
src_pgd = pgd_offset(src, address)-1;
dst_pgd = pgd_offset(dst, address)-1;
......@@ -329,35 +319,8 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
pte = pte_mkold(pte);
get_page(page);
dst->rss++;
set_pte(dst_pte, pte);
if (PageAnon(page))
pte_chain = page_add_rmap(page,
dst_pte, pte_chain);
else
page_add_file_rmap(page);
if (pte_chain)
goto cont_copy_pte_range_noset;
pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN);
if (pte_chain)
goto cont_copy_pte_range_noset;
/*
* pte_chain allocation failed, and we need to
* run page reclaim.
*/
pte_unmap_nested(src_pte);
pte_unmap(dst_pte);
spin_unlock(&src->page_table_lock);
spin_unlock(&dst->page_table_lock);
pte_chain = pte_chain_alloc(GFP_KERNEL);
spin_lock(&dst->page_table_lock);
if (!pte_chain)
goto nomem;
spin_lock(&src->page_table_lock);
dst_pte = pte_offset_map(dst_pmd, address);
src_pte = pte_offset_map_nested(src_pmd,
address);
page_dup_rmap(page);
cont_copy_pte_range_noset:
address += PAGE_SIZE;
if (address >= end) {
......@@ -371,7 +334,7 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
pte_unmap_nested(src_pte-1);
pte_unmap(dst_pte-1);
spin_unlock(&src->page_table_lock);
cond_resched_lock(&dst->page_table_lock);
cont_copy_pmd_range:
src_pmd++;
dst_pmd++;
......@@ -380,10 +343,8 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
out_unlock:
spin_unlock(&src->page_table_lock);
out:
pte_chain_free(pte_chain);
return 0;
nomem:
pte_chain_free(pte_chain);
return -ENOMEM;
}
......@@ -449,7 +410,7 @@ static void zap_pte_range(struct mmu_gather *tlb,
if (pte_young(pte) && page_mapping(page))
mark_page_accessed(page);
tlb->freed++;
page_remove_rmap(page, ptep);
page_remove_rmap(page);
tlb_remove_page(tlb, page);
continue;
}
......@@ -1073,7 +1034,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
{
struct page *old_page, *new_page;
unsigned long pfn = pte_pfn(pte);
struct pte_chain *pte_chain;
pte_t entry;
if (unlikely(!pfn_valid(pfn))) {
......@@ -1112,9 +1072,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
page_cache_get(old_page);
spin_unlock(&mm->page_table_lock);
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain)
goto no_pte_chain;
new_page = alloc_page(GFP_HIGHUSER);
if (!new_page)
goto no_new_page;
......@@ -1128,10 +1085,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
if (pte_same(*page_table, pte)) {
if (PageReserved(old_page))
++mm->rss;
page_remove_rmap(old_page, page_table);
else
page_remove_rmap(old_page);
break_cow(vma, new_page, address, page_table);
pte_chain = page_add_rmap(new_page, page_table, pte_chain);
lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, mm, address);
/* Free the old page.. */
new_page = old_page;
......@@ -1140,12 +1098,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
page_cache_release(new_page);
page_cache_release(old_page);
spin_unlock(&mm->page_table_lock);
pte_chain_free(pte_chain);
return VM_FAULT_MINOR;
no_new_page:
pte_chain_free(pte_chain);
no_pte_chain:
page_cache_release(old_page);
return VM_FAULT_OOM;
}
......@@ -1317,7 +1272,6 @@ static int do_swap_page(struct mm_struct * mm,
swp_entry_t entry = pte_to_swp_entry(orig_pte);
pte_t pte;
int ret = VM_FAULT_MINOR;
struct pte_chain *pte_chain = NULL;
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
......@@ -1347,11 +1301,6 @@ static int do_swap_page(struct mm_struct * mm,
}
mark_page_accessed(page);
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain) {
ret = VM_FAULT_OOM;
goto out;
}
lock_page(page);
/*
......@@ -1383,14 +1332,13 @@ static int do_swap_page(struct mm_struct * mm,
flush_icache_page(vma, page);
set_pte(page_table, pte);
pte_chain = page_add_rmap(page, page_table, pte_chain);
page_add_anon_rmap(page, mm, address);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
out:
pte_chain_free(pte_chain);
return ret;
}
......@@ -1406,19 +1354,6 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
pte_t entry;
struct page * page = ZERO_PAGE(addr);
struct pte_chain *pte_chain;
int ret;
pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN);
if (!pte_chain) {
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain)
goto no_mem;
spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, addr);
}
/* Read-only mapping of ZERO_PAGE. */
entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
......@@ -1441,7 +1376,6 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table);
page_cache_release(page);
spin_unlock(&mm->page_table_lock);
ret = VM_FAULT_MINOR;
goto out;
}
mm->rss++;
......@@ -1450,24 +1384,19 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
vma);
lru_cache_add_active(page);
mark_page_accessed(page);
page_add_anon_rmap(page, mm, addr);
}
set_pte(page_table, entry);
/* ignores ZERO_PAGE */
pte_chain = page_add_rmap(page, page_table, pte_chain);
pte_unmap(page_table);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, entry);
spin_unlock(&mm->page_table_lock);
ret = VM_FAULT_MINOR;
goto out;
no_mem:
ret = VM_FAULT_OOM;
out:
pte_chain_free(pte_chain);
return ret;
return VM_FAULT_MINOR;
no_mem:
return VM_FAULT_OOM;
}
/*
......@@ -1489,7 +1418,6 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page * new_page;
struct address_space *mapping = NULL;
pte_t entry;
struct pte_chain *pte_chain;
int sequence = 0;
int ret = VM_FAULT_MINOR;
int anon = 0;
......@@ -1514,10 +1442,6 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (new_page == NOPAGE_OOM)
return VM_FAULT_OOM;
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain)
goto oom;
/*
* Should we do an early C-O-W break?
*/
......@@ -1542,7 +1466,6 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
sequence = atomic_read(&mapping->truncate_count);
spin_unlock(&mm->page_table_lock);
page_cache_release(new_page);
pte_chain_free(pte_chain);
goto retry;
}
page_table = pte_offset_map(pmd, address);
......@@ -1568,8 +1491,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
set_pte(page_table, entry);
if (anon) {
lru_cache_add_active(new_page);
pte_chain = page_add_rmap(new_page,
page_table, pte_chain);
page_add_anon_rmap(new_page, mm, address);
} else
page_add_file_rmap(new_page);
pte_unmap(page_table);
......@@ -1589,7 +1511,6 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(new_page);
ret = VM_FAULT_OOM;
out:
pte_chain_free(pte_chain);
return ret;
}
......
......@@ -81,7 +81,7 @@ static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr)
static void
copy_one_pte(struct vm_area_struct *vma, unsigned long old_addr,
pte_t *src, pte_t *dst, struct pte_chain **pte_chainp)
unsigned long new_addr, pte_t *src, pte_t *dst)
{
pte_t pte = ptep_clear_flush(vma, old_addr, src);
set_pte(dst, pte);
......@@ -91,8 +91,8 @@ copy_one_pte(struct vm_area_struct *vma, unsigned long old_addr,
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (PageAnon(page)) {
page_remove_rmap(page, src);
*pte_chainp = page_add_rmap(page, dst, *pte_chainp);
page_remove_rmap(page);
page_add_anon_rmap(page, vma->vm_mm, new_addr);
}
}
}
......@@ -105,13 +105,7 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
struct mm_struct *mm = vma->vm_mm;
int error = 0;
pte_t *src, *dst;
struct pte_chain *pte_chain;
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain) {
error = -ENOMEM;
goto out;
}
spin_lock(&mm->page_table_lock);
src = get_one_pte_map_nested(mm, old_addr);
if (src) {
......@@ -133,8 +127,7 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
*/
if (src) {
if (dst)
copy_one_pte(vma, old_addr, src,
dst, &pte_chain);
copy_one_pte(vma, old_addr, new_addr, src, dst);
else
error = -ENOMEM;
pte_unmap_nested(src);
......@@ -143,8 +136,6 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
pte_unmap(dst);
}
spin_unlock(&mm->page_table_lock);
pte_chain_free(pte_chain);
out:
return error;
}
......
......@@ -572,10 +572,6 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
}
void pte_chain_init(void)
{
}
void swap_unplug_io_fn(struct backing_dev_info *)
{
}
This diff is collapsed.
......@@ -427,19 +427,19 @@ void free_swap_and_cache(swp_entry_t entry)
/* vma->vm_mm->page_table_lock is held */
static void
unuse_pte(struct vm_area_struct *vma, unsigned long address, pte_t *dir,
swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp)
swp_entry_t entry, struct page *page)
{
vma->vm_mm->rss++;
get_page(page);
set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
*pte_chainp = page_add_rmap(page, dir, *pte_chainp);
page_add_anon_rmap(page, vma->vm_mm, address);
swap_free(entry);
}
/* vma->vm_mm->page_table_lock is held */
static int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
unsigned long address, unsigned long size, unsigned long offset,
swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp)
swp_entry_t entry, struct page *page)
{
pte_t * pte;
unsigned long end;
......@@ -464,8 +464,7 @@ static int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
unuse_pte(vma, offset + address, pte,
entry, page, pte_chainp);
unuse_pte(vma, offset + address, pte, entry, page);
pte_unmap(pte);
return 1;
}
......@@ -479,7 +478,7 @@ static int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
/* vma->vm_mm->page_table_lock is held */
static int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
unsigned long address, unsigned long size,
swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp)
swp_entry_t entry, struct page *page)
{
pmd_t * pmd;
unsigned long offset, end;
......@@ -501,7 +500,7 @@ static int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
BUG();
do {
if (unuse_pmd(vma, pmd, address, end - address,
offset, entry, page, pte_chainp))
offset, entry, page))
return 1;
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
......@@ -511,15 +510,14 @@ static int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
/* vma->vm_mm->page_table_lock is held */
static int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
swp_entry_t entry, struct page *page, struct pte_chain **pte_chainp)
swp_entry_t entry, struct page *page)
{
unsigned long start = vma->vm_start, end = vma->vm_end;
if (start >= end)
BUG();
do {
if (unuse_pgd(vma, pgdir, start, end - start,
entry, page, pte_chainp))
if (unuse_pgd(vma, pgdir, start, end - start, entry, page))
return 1;
start = (start + PGDIR_SIZE) & PGDIR_MASK;
pgdir++;
......@@ -531,11 +529,6 @@ static int unuse_process(struct mm_struct * mm,
swp_entry_t entry, struct page* page)
{
struct vm_area_struct* vma;
struct pte_chain *pte_chain;
pte_chain = pte_chain_alloc(GFP_KERNEL);
if (!pte_chain)
return -ENOMEM;
/*
* Go through process' page directory.
......@@ -543,11 +536,10 @@ static int unuse_process(struct mm_struct * mm,
spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
pgd_t * pgd = pgd_offset(mm, vma->vm_start);
if (unuse_vma(vma, pgd, entry, page, &pte_chain))
if (unuse_vma(vma, pgd, entry, page))
break;
}
spin_unlock(&mm->page_table_lock);
pte_chain_free(pte_chain);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment