Commit e9d00e5c authored by Linus Torvalds's avatar Linus Torvalds Committed by Linus Torvalds

This improves on the page table TLB shootdown. Almost there.

parent 7c9d187e
...@@ -2153,11 +2153,6 @@ static void __init init_intel(struct cpuinfo_x86 *c) ...@@ -2153,11 +2153,6 @@ static void __init init_intel(struct cpuinfo_x86 *c)
strcpy(c->x86_model_id, p); strcpy(c->x86_model_id, p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* PGE CPUID bug: Pentium4 supports PGE, but seems to have SMP bugs.. */
if ( c->x86 == 15 )
clear_bit(X86_FEATURE_PGE, c->x86_capability);
if (test_bit(X86_FEATURE_HT, c->x86_capability)) { if (test_bit(X86_FEATURE_HT, c->x86_capability)) {
extern int phys_proc_id[NR_CPUS]; extern int phys_proc_id[NR_CPUS];
......
...@@ -28,8 +28,7 @@ typedef struct free_pte_ctx { ...@@ -28,8 +28,7 @@ typedef struct free_pte_ctx {
struct mm_struct *mm; struct mm_struct *mm;
unsigned long nr; /* set to ~0UL means fast mode */ unsigned long nr; /* set to ~0UL means fast mode */
unsigned long freed; unsigned long freed;
unsigned long start_addr, end_addr; struct page * pages[FREE_PTE_NR];
pte_t ptes[FREE_PTE_NR];
} mmu_gather_t; } mmu_gather_t;
/* Users of the generic TLB shootdown code must declare this storage space. */ /* Users of the generic TLB shootdown code must declare this storage space. */
...@@ -55,20 +54,15 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm) ...@@ -55,20 +54,15 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm)
static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end) static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
{ {
unsigned long i, nr; unsigned long nr;
/* Handle the fast case first. */
if (tlb->nr == ~0UL) {
flush_tlb_mm(tlb->mm); flush_tlb_mm(tlb->mm);
return;
}
nr = tlb->nr; nr = tlb->nr;
if (nr != ~0UL) {
unsigned long i;
tlb->nr = 0; tlb->nr = 0;
if (nr) for (i=0; i < nr; i++)
flush_tlb_mm(tlb->mm); free_page_and_swap_cache(tlb->pages[i]);
for (i=0; i < nr; i++) {
pte_t pte = tlb->ptes[i];
__free_pte(pte);
} }
} }
...@@ -85,7 +79,6 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign ...@@ -85,7 +79,6 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign
if (rss < freed) if (rss < freed)
freed = rss; freed = rss;
mm->rss = rss - freed; mm->rss = rss - freed;
tlb_flush_mmu(tlb, start, end); tlb_flush_mmu(tlb, start, end);
} }
...@@ -95,29 +88,16 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign ...@@ -95,29 +88,16 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign
* handling the additional races in SMP caused by other CPUs caching valid * handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. * mappings in their TLBs.
*/ */
static inline void tlb_remove_page(mmu_gather_t *tlb, pte_t *pte, unsigned long addr) static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page)
{ {
struct page *page;
unsigned long pfn = pte_pfn(*pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page))
tlb->freed++;
}
/* Handle the common case fast, first. */\ /* Handle the common case fast, first. */\
if (tlb->nr == ~0UL) { if (tlb->nr == ~0UL) {
__free_pte(*pte); free_page_and_swap_cache(page);
pte_clear(pte);
return; return;
} }
if (!tlb->nr) tlb->pages[tlb->nr++] = page;
tlb->start_addr = addr;
tlb->ptes[tlb->nr++] = ptep_get_and_clear(pte);
tlb->end_addr = addr + PAGE_SIZE;
if (tlb->nr >= FREE_PTE_NR) if (tlb->nr >= FREE_PTE_NR)
tlb_finish_mmu(tlb, 0, 0); tlb_flush_mmu(tlb, 0, 0);
} }
#endif /* _ASM_GENERIC__TLB_H */ #endif /* _ASM_GENERIC__TLB_H */
......
...@@ -35,6 +35,9 @@ static inline void pte_free(struct page *pte) ...@@ -35,6 +35,9 @@ static inline void pte_free(struct page *pte)
__free_page(pte); __free_page(pte);
} }
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/* /*
* allocating and freeing a pmd is trivial: the 1-entry pmd is * allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it. * inside the pgd, so has no extra memory associated with it.
...@@ -43,6 +46,7 @@ static inline void pte_free(struct page *pte) ...@@ -43,6 +46,7 @@ static inline void pte_free(struct page *pte)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
......
...@@ -311,8 +311,6 @@ extern mem_map_t * mem_map; ...@@ -311,8 +311,6 @@ extern mem_map_t * mem_map;
extern void show_free_areas(void); extern void show_free_areas(void);
extern void show_free_areas_node(pg_data_t *pgdat); extern void show_free_areas_node(pg_data_t *pgdat);
extern void clear_page_tables(struct mm_struct *, unsigned long, int);
extern int fail_writepage(struct page *); extern int fail_writepage(struct page *);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused); struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
struct file *shmem_file_setup(char * name, loff_t size); struct file *shmem_file_setup(char * name, loff_t size);
......
...@@ -71,29 +71,11 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned ...@@ -71,29 +71,11 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
mem_map_t * mem_map; mem_map_t * mem_map;
/*
* Called by TLB shootdown
*/
void __free_pte(pte_t pte)
{
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (PageReserved(page))
return;
if (pte_dirty(pte))
set_page_dirty(page);
free_page_and_swap_cache(page);
}
/* /*
* Note: this doesn't free the actual pages themselves. That * Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions. * has been handled earlier when unmapping all the memory regions.
*/ */
static inline void free_one_pmd(pmd_t * dir) static inline void free_one_pmd(mmu_gather_t *tlb, pmd_t * dir)
{ {
struct page *pte; struct page *pte;
...@@ -106,10 +88,10 @@ static inline void free_one_pmd(pmd_t * dir) ...@@ -106,10 +88,10 @@ static inline void free_one_pmd(pmd_t * dir)
} }
pte = pmd_page(*dir); pte = pmd_page(*dir);
pmd_clear(dir); pmd_clear(dir);
pte_free(pte); pte_free_tlb(tlb, pte);
} }
static inline void free_one_pgd(pgd_t * dir) static inline void free_one_pgd(mmu_gather_t *tlb, pgd_t * dir)
{ {
int j; int j;
pmd_t * pmd; pmd_t * pmd;
...@@ -125,9 +107,9 @@ static inline void free_one_pgd(pgd_t * dir) ...@@ -125,9 +107,9 @@ static inline void free_one_pgd(pgd_t * dir)
pgd_clear(dir); pgd_clear(dir);
for (j = 0; j < PTRS_PER_PMD ; j++) { for (j = 0; j < PTRS_PER_PMD ; j++) {
prefetchw(pmd+j+(PREFETCH_STRIDE/16)); prefetchw(pmd+j+(PREFETCH_STRIDE/16));
free_one_pmd(pmd+j); free_one_pmd(tlb, pmd+j);
} }
pmd_free(pmd); pmd_free_tlb(tlb, pmd);
} }
/* /*
...@@ -136,13 +118,13 @@ static inline void free_one_pgd(pgd_t * dir) ...@@ -136,13 +118,13 @@ static inline void free_one_pgd(pgd_t * dir)
* *
* Must be called with pagetable lock held. * Must be called with pagetable lock held.
*/ */
void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr) void clear_page_tables(mmu_gather_t *tlb, unsigned long first, int nr)
{ {
pgd_t * page_dir = mm->pgd; pgd_t * page_dir = tlb->mm->pgd;
page_dir += first; page_dir += first;
do { do {
free_one_pgd(page_dir); free_one_pgd(tlb, page_dir);
page_dir++; page_dir++;
} while (--nr); } while (--nr);
...@@ -362,8 +344,18 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, ...@@ -362,8 +344,18 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address,
if (pte_none(pte)) if (pte_none(pte))
continue; continue;
if (pte_present(pte)) { if (pte_present(pte)) {
/* This will eventually call __free_pte on the pte. */ unsigned long pfn = pte_pfn(pte);
tlb_remove_page(tlb, ptep, address + offset);
pte_clear(ptep);
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)) {
if (pte_dirty(pte))
set_page_dirty(page);
tlb_remove_page(tlb, page);
}
}
} else { } else {
free_swap_and_cache(pte_to_swp_entry(pte)); free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(ptep); pte_clear(ptep);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
extern void unmap_page_range(mmu_gather_t *,struct vm_area_struct *vma, unsigned long address, unsigned long size); extern void unmap_page_range(mmu_gather_t *,struct vm_area_struct *vma, unsigned long address, unsigned long size);
extern void clear_page_tables(mmu_gather_t *tlb, unsigned long first, int nr);
/* /*
* WARNING: the debugging will use recursive algorithms so never enable this * WARNING: the debugging will use recursive algorithms so never enable this
...@@ -835,12 +836,13 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, ...@@ -835,12 +836,13 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
* "prev", if it exists, points to a vma before the one * "prev", if it exists, points to a vma before the one
* we just free'd - but there's no telling how much before. * we just free'd - but there's no telling how much before.
*/ */
static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev, static void free_pgtables(mmu_gather_t *tlb, struct vm_area_struct *prev,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long first = start & PGDIR_MASK; unsigned long first = start & PGDIR_MASK;
unsigned long last = end + PGDIR_SIZE - 1; unsigned long last = end + PGDIR_SIZE - 1;
unsigned long start_index, end_index; unsigned long start_index, end_index;
struct mm_struct *mm = tlb->mm;
if (!prev) { if (!prev) {
prev = mm->mmap; prev = mm->mmap;
...@@ -875,7 +877,7 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev, ...@@ -875,7 +877,7 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
start_index = pgd_index(first); start_index = pgd_index(first);
end_index = pgd_index(last); end_index = pgd_index(last);
if (end_index > start_index) { if (end_index > start_index) {
clear_page_tables(mm, start_index, end_index - start_index); clear_page_tables(tlb, start_index, end_index - start_index);
flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK); flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
} }
} }
...@@ -974,7 +976,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) ...@@ -974,7 +976,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
if (extra) if (extra)
kmem_cache_free(vm_area_cachep, extra); kmem_cache_free(vm_area_cachep, extra);
free_pgtables(mm, prev, addr, addr+len); free_pgtables(tlb, prev, addr, addr+len);
tlb_finish_mmu(tlb, addr, addr+len); tlb_finish_mmu(tlb, addr, addr+len);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
...@@ -1130,7 +1132,7 @@ void exit_mmap(struct mm_struct * mm) ...@@ -1130,7 +1132,7 @@ void exit_mmap(struct mm_struct * mm)
if (mm->map_count) if (mm->map_count)
BUG(); BUG();
clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
tlb_finish_mmu(tlb, FIRST_USER_PGD_NR*PGDIR_SIZE, USER_PTRS_PER_PGD*PGDIR_SIZE); tlb_finish_mmu(tlb, FIRST_USER_PGD_NR*PGDIR_SIZE, USER_PTRS_PER_PGD*PGDIR_SIZE);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment