Commit e6d19c6a authored by Linus Torvalds's avatar Linus Torvalds Committed by Linus Torvalds

Make generic TLB shootdown friendlier to non-x86 architectures

parent 00f42361
...@@ -16,8 +16,17 @@ ...@@ -16,8 +16,17 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/* aim for something that fits in the L1 cache */ /*
#define FREE_PTE_NR 508 * For UP we don't need to worry about TLB flush
* and page free order so much..
*/
#ifdef CONFIG_SMP
#define FREE_PTE_NR 507
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
#else
#define FREE_PTE_NR 1
#define tlb_fast_mode(tlb) 1
#endif
/* mmu_gather_t is an opaque type used by the mm code for passing around any /* mmu_gather_t is an opaque type used by the mm code for passing around any
* data needed by arch specific code for tlb_remove_page. This structure can * data needed by arch specific code for tlb_remove_page. This structure can
...@@ -34,10 +43,6 @@ typedef struct free_pte_ctx { ...@@ -34,10 +43,6 @@ typedef struct free_pte_ctx {
/* Users of the generic TLB shootdown code must declare this storage space. */ /* Users of the generic TLB shootdown code must declare this storage space. */
extern mmu_gather_t mmu_gathers[NR_CPUS]; extern mmu_gather_t mmu_gathers[NR_CPUS];
/* Do me later */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
/* tlb_gather_mmu /* tlb_gather_mmu
* Return a pointer to an initialized mmu_gather_t. * Return a pointer to an initialized mmu_gather_t.
*/ */
...@@ -57,9 +62,9 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne ...@@ -57,9 +62,9 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne
{ {
unsigned long nr; unsigned long nr;
flush_tlb_mm(tlb->mm); tlb_flush(tlb);
nr = tlb->nr; nr = tlb->nr;
if (nr != ~0UL) { if (!tlb_fast_mode(tlb)) {
unsigned long i; unsigned long i;
tlb->nr = 0; tlb->nr = 0;
for (i=0; i < nr; i++) for (i=0; i < nr; i++)
...@@ -91,8 +96,7 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign ...@@ -91,8 +96,7 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign
*/ */
static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page) static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page)
{ {
/* Handle the common case fast, first. */\ if (tlb_fast_mode(tlb)) {
if (tlb->nr == ~0UL) {
free_page_and_swap_cache(page); free_page_and_swap_cache(page);
return; return;
} }
......
#ifndef _I386_TLB_H
#define _I386_TLB_H
/*
* x86 doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif
...@@ -348,11 +348,13 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, ...@@ -348,11 +348,13 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address,
pte_clear(ptep); pte_clear(ptep);
pfn = pte_pfn(pte); pfn = pte_pfn(pte);
tlb_remove_tlb_entry(tlb, pte, address+offset);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)) { if (!PageReserved(page)) {
if (pte_dirty(pte)) if (pte_dirty(pte))
set_page_dirty(page); set_page_dirty(page);
tlb->freed++;
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment