Commit e9d00e5c authored by Linus Torvalds's avatar Linus Torvalds Committed by Linus Torvalds

This improves on the page table TLB shootdown. Almost there.

parent 7c9d187e
......@@ -2153,11 +2153,6 @@ static void __init init_intel(struct cpuinfo_x86 *c)
strcpy(c->x86_model_id, p);
#ifdef CONFIG_SMP
/* PGE CPUID bug: Pentium4 supports PGE, but seems to have SMP bugs.. */
if ( c->x86 == 15 )
clear_bit(X86_FEATURE_PGE, c->x86_capability);
if (test_bit(X86_FEATURE_HT, c->x86_capability)) {
extern int phys_proc_id[NR_CPUS];
......
......@@ -28,8 +28,7 @@ typedef struct free_pte_ctx {
struct mm_struct *mm;
unsigned long nr; /* set to ~0UL means fast mode */
unsigned long freed;
unsigned long start_addr, end_addr;
pte_t ptes[FREE_PTE_NR];
struct page * pages[FREE_PTE_NR];
} mmu_gather_t;
/* Users of the generic TLB shootdown code must declare this storage space. */
......@@ -55,20 +54,15 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm)
static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
unsigned long i, nr;
unsigned long nr;
/* Handle the fast case first. */
if (tlb->nr == ~0UL) {
flush_tlb_mm(tlb->mm);
return;
}
flush_tlb_mm(tlb->mm);
nr = tlb->nr;
tlb->nr = 0;
if (nr)
flush_tlb_mm(tlb->mm);
for (i=0; i < nr; i++) {
pte_t pte = tlb->ptes[i];
__free_pte(pte);
if (nr != ~0UL) {
unsigned long i;
tlb->nr = 0;
for (i=0; i < nr; i++)
free_page_and_swap_cache(tlb->pages[i]);
}
}
......@@ -85,7 +79,6 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign
if (rss < freed)
freed = rss;
mm->rss = rss - freed;
tlb_flush_mmu(tlb, start, end);
}
......@@ -95,29 +88,16 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs.
*/
static inline void tlb_remove_page(mmu_gather_t *tlb, pte_t *pte, unsigned long addr)
static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page)
{
struct page *page;
unsigned long pfn = pte_pfn(*pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page))
tlb->freed++;
}
/* Handle the common case fast, first. */\
if (tlb->nr == ~0UL) {
__free_pte(*pte);
pte_clear(pte);
free_page_and_swap_cache(page);
return;
}
if (!tlb->nr)
tlb->start_addr = addr;
tlb->ptes[tlb->nr++] = ptep_get_and_clear(pte);
tlb->end_addr = addr + PAGE_SIZE;
tlb->pages[tlb->nr++] = page;
if (tlb->nr >= FREE_PTE_NR)
tlb_finish_mmu(tlb, 0, 0);
tlb_flush_mmu(tlb, 0, 0);
}
#endif /* _ASM_GENERIC__TLB_H */
......
......@@ -35,6 +35,9 @@ static inline void pte_free(struct page *pte)
__free_page(pte);
}
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
......@@ -43,6 +46,7 @@ static inline void pte_free(struct page *pte)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0)
......
......@@ -311,8 +311,6 @@ extern mem_map_t * mem_map;
extern void show_free_areas(void);
extern void show_free_areas_node(pg_data_t *pgdat);
extern void clear_page_tables(struct mm_struct *, unsigned long, int);
extern int fail_writepage(struct page *);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
struct file *shmem_file_setup(char * name, loff_t size);
......
......@@ -71,29 +71,11 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
mem_map_t * mem_map;
/*
* Called by TLB shootdown
*/
void __free_pte(pte_t pte)
{
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (PageReserved(page))
return;
if (pte_dirty(pte))
set_page_dirty(page);
free_page_and_swap_cache(page);
}
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static inline void free_one_pmd(pmd_t * dir)
static inline void free_one_pmd(mmu_gather_t *tlb, pmd_t * dir)
{
struct page *pte;
......@@ -106,10 +88,10 @@ static inline void free_one_pmd(pmd_t * dir)
}
pte = pmd_page(*dir);
pmd_clear(dir);
pte_free(pte);
pte_free_tlb(tlb, pte);
}
static inline void free_one_pgd(pgd_t * dir)
static inline void free_one_pgd(mmu_gather_t *tlb, pgd_t * dir)
{
int j;
pmd_t * pmd;
......@@ -125,9 +107,9 @@ static inline void free_one_pgd(pgd_t * dir)
pgd_clear(dir);
for (j = 0; j < PTRS_PER_PMD ; j++) {
prefetchw(pmd+j+(PREFETCH_STRIDE/16));
free_one_pmd(pmd+j);
free_one_pmd(tlb, pmd+j);
}
pmd_free(pmd);
pmd_free_tlb(tlb, pmd);
}
/*
......@@ -136,13 +118,13 @@ static inline void free_one_pgd(pgd_t * dir)
*
* Must be called with pagetable lock held.
*/
void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
void clear_page_tables(mmu_gather_t *tlb, unsigned long first, int nr)
{
pgd_t * page_dir = mm->pgd;
pgd_t * page_dir = tlb->mm->pgd;
page_dir += first;
do {
free_one_pgd(page_dir);
free_one_pgd(tlb, page_dir);
page_dir++;
} while (--nr);
......@@ -362,8 +344,18 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address,
if (pte_none(pte))
continue;
if (pte_present(pte)) {
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
unsigned long pfn = pte_pfn(pte);
pte_clear(ptep);
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)) {
if (pte_dirty(pte))
set_page_dirty(page);
tlb_remove_page(tlb, page);
}
}
} else {
free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(ptep);
......
......@@ -20,6 +20,7 @@
#include <asm/tlb.h>
extern void unmap_page_range(mmu_gather_t *,struct vm_area_struct *vma, unsigned long address, unsigned long size);
extern void clear_page_tables(mmu_gather_t *tlb, unsigned long first, int nr);
/*
* WARNING: the debugging will use recursive algorithms so never enable this
......@@ -835,12 +836,13 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
* "prev", if it exists, points to a vma before the one
* we just free'd - but there's no telling how much before.
*/
static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
static void free_pgtables(mmu_gather_t *tlb, struct vm_area_struct *prev,
unsigned long start, unsigned long end)
{
unsigned long first = start & PGDIR_MASK;
unsigned long last = end + PGDIR_SIZE - 1;
unsigned long start_index, end_index;
struct mm_struct *mm = tlb->mm;
if (!prev) {
prev = mm->mmap;
......@@ -875,7 +877,7 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
start_index = pgd_index(first);
end_index = pgd_index(last);
if (end_index > start_index) {
clear_page_tables(mm, start_index, end_index - start_index);
clear_page_tables(tlb, start_index, end_index - start_index);
flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
}
}
......@@ -974,7 +976,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
if (extra)
kmem_cache_free(vm_area_cachep, extra);
free_pgtables(mm, prev, addr, addr+len);
free_pgtables(tlb, prev, addr, addr+len);
tlb_finish_mmu(tlb, addr, addr+len);
spin_unlock(&mm->page_table_lock);
......@@ -1130,7 +1132,7 @@ void exit_mmap(struct mm_struct * mm)
if (mm->map_count)
BUG();
clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
tlb_finish_mmu(tlb, FIRST_USER_PGD_NR*PGDIR_SIZE, USER_PTRS_PER_PGD*PGDIR_SIZE);
spin_unlock(&mm->page_table_lock);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment