Commit 0c17b328 authored by Andrew Morton's avatar Andrew Morton Committed by Richard Henderson

[PATCH] replace `typedef mmu_gather_t' with `struct mmu_gather'

In the next patch I wish to add to mm.h prototypes of functions which take an
mmu_gather_t* argument.   To do this I must either:

a) include tlb.h in mm.h

   Not good - more nested includes when a simple forward decl is sufficient.

b) Add `typedef struct free_pte_ctx mmu_gather_t;' to mm.h.

   That's silly - it's supposed to be an opaque type.

   or

c) Remove the pesky typedef.

   Bingo.
parent ab706391
......@@ -34,7 +34,7 @@
#include <asm/console.h>
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
extern void die_if_kernel(char *,struct pt_regs *,long);
......
......@@ -47,7 +47,7 @@
#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(pte_t))
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern char _stext, _text, _etext, _end, __init_begin, __init_end;
......
......@@ -41,7 +41,7 @@
#include <asm/tlbflush.h>
#include <asm/sections.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
unsigned long highstart_pfn, highend_pfn;
/*
......
......@@ -29,7 +29,7 @@
#include <asm/uaccess.h>
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
/* References to section boundaries: */
extern char _stext, _etext, _edata, __init_begin, __init_end;
......
......@@ -33,7 +33,7 @@
#endif
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
/*
* ZERO_PAGE is a special page that is used for zero-initialized
......
......@@ -42,7 +42,7 @@
#include <asm/mmu_context.h>
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
extern void prom_free_prom_memory(void);
......
......@@ -37,7 +37,7 @@
#include <asm/mmu_context.h>
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
void pgd_init(unsigned long page)
{
......
......@@ -23,7 +23,7 @@
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
extern char _text; /* start of kernel code, defined by linker */
extern int data_start;
......
......@@ -55,7 +55,7 @@
#endif
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
unsigned long total_memory;
unsigned long total_lowmem;
......
......@@ -50,7 +50,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
* Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done.
*/
void tlb_flush(mmu_gather_t *tlb)
void tlb_flush(struct mmu_gather *tlb)
{
if (Hash == 0) {
/*
......
......@@ -95,7 +95,7 @@ unsigned long __max_memory;
/* This is declared as we are using the more or less generic
* include/asm-ppc64/tlb.h file -- tgall
*/
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
void show_mem(void)
{
......
......@@ -38,7 +38,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
......
......@@ -38,7 +38,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
......
......@@ -36,7 +36,7 @@
#include <asm/io.h>
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
/*
* Cache of MMU context last used.
......
......@@ -34,7 +34,7 @@
#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
#include <asm/tlb.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
unsigned long *sparc_valid_addr_bitmap;
......
......@@ -36,7 +36,7 @@
#include <asm/tlb.h>
#include <asm/spitfire.h>
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
extern void device_scan(void);
......
......@@ -44,7 +44,7 @@ extern char __init_begin, __init_end;
extern long physmem_size;
/* Not changed by UML */
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
/* Changed during early boot */
int kmalloc_ok = 0;
......
......@@ -40,7 +40,7 @@
unsigned long start_pfn, end_pfn;
mmu_gather_t mmu_gathers[NR_CPUS];
struct mmu_gather mmu_gathers[NR_CPUS];
/*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
......
......@@ -23,20 +23,21 @@
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
typedef struct free_pte_ctx {
struct mmu_gather {
struct mm_struct *mm;
unsigned int freed;
unsigned int flushes;
unsigned int avoided_flushes;
} mmu_gather_t;
};
extern mmu_gather_t mmu_gathers[NR_CPUS];
extern struct mmu_gather mmu_gathers[NR_CPUS];
static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
int cpu = smp_processor_id();
mmu_gather_t *tlb = &mmu_gathers[cpu];
struct mmu_gather *tlb = &mmu_gathers[cpu];
tlb->mm = mm;
tlb->freed = 0;
......@@ -44,7 +45,8 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int fu
return tlb;
}
static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
struct mm_struct *mm = tlb->mm;
unsigned long freed = tlb->freed;
......
......@@ -28,29 +28,30 @@
#define tlb_fast_mode(tlb) 1
#endif
/* mmu_gather_t is an opaque type used by the mm code for passing around any
* data needed by arch specific code for tlb_remove_page. This structure can
* be per-CPU or per-MM as the page table lock is held for the duration of TLB
* shootdown.
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page. This structure
* can be per-CPU or per-MM as the page table lock is held for the duration of
* TLB shootdown.
*/
typedef struct free_pte_ctx {
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* set to ~0U means fast mode */
unsigned int need_flush;/* Really unmapped some ptes? */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed;
struct page * pages[FREE_PTE_NR];
} mmu_gather_t;
};
/* Users of the generic TLB shootdown code must declare this storage space. */
extern mmu_gather_t mmu_gathers[NR_CPUS];
extern struct mmu_gather mmu_gathers[NR_CPUS];
/* tlb_gather_mmu
* Return a pointer to an initialized mmu_gather_t.
* Return a pointer to an initialized struct mmu_gather.
*/
static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
struct mmu_gather *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm;
......@@ -63,7 +64,8 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int fu
return tlb;
}
static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
static inline void
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (!tlb->need_flush)
return;
......@@ -79,7 +81,8 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne
* Called at the end of the shootdown operation to free up any resources
* that were required. The page table lock is still held at this point.
*/
static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
int freed = tlb->freed;
struct mm_struct *mm = tlb->mm;
......@@ -95,12 +98,12 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign
}
/* void tlb_remove_page(mmu_gather_t *tlb, pte_t *ptep, unsigned long addr)
/* void tlb_remove_page(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs.
*/
static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
......
......@@ -51,7 +51,7 @@
# define tlb_fast_mode(tlb) (1)
#endif
typedef struct {
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* == ~0U => fast mode */
unsigned char fullmm; /* non-zero means full mm flush */
......@@ -60,17 +60,18 @@ typedef struct {
unsigned long start_addr;
unsigned long end_addr;
struct page *pages[FREE_PTE_NR];
} mmu_gather_t;
};
/* Users of the generic TLB shootdown code must declare this storage space. */
extern mmu_gather_t mmu_gathers[NR_CPUS];
extern struct mmu_gather mmu_gathers[NR_CPUS];
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
*/
static inline void
ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
ia64_tlb_flush_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
unsigned int nr;
......@@ -120,12 +121,12 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
}
/*
* Return a pointer to an initialized mmu_gather_t.
* Return a pointer to an initialized struct mmu_gather.
*/
static inline mmu_gather_t *
static inline struct mmu_gather *
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
{
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
struct mmu_gather *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm;
/*
......@@ -153,7 +154,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
* collected. The page table lock is still held at this point.
*/
static inline void
tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
unsigned long freed = tlb->freed;
struct mm_struct *mm = tlb->mm;
......@@ -178,7 +179,7 @@ tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
* this file).
*/
static inline void
tlb_remove_page (mmu_gather_t *tlb, struct page *page)
tlb_remove_page (struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
......@@ -196,7 +197,8 @@ tlb_remove_page (mmu_gather_t *tlb, struct page *page)
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
__tlb_remove_tlb_entry(struct mmu_gather *tlb,
pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
......
......@@ -55,7 +55,7 @@ static inline void pte_free(struct page *page)
__free_page(page);
}
static inline void __pte_free_tlb(mmu_gather_t *tlb, struct page *page)
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
{
cache_page(kmap(page));
kunmap(page);
......@@ -73,7 +73,7 @@ static inline int pmd_free(pmd_t *pmd)
return free_pointer_table(pmd);
}
static inline int __pmd_free_tlb(mmu_gather_t *tlb, pmd_t *pmd)
static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
return free_pointer_table(pmd);
}
......
......@@ -31,7 +31,7 @@ static inline void pte_free(struct page *page)
__free_page(page);
}
static inline void __pte_free_tlb(mmu_gather_t *tlb, struct page *page)
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
{
tlb_remove_page(tlb, page);
}
......
......@@ -34,7 +34,7 @@ extern void tlb_flush(struct free_pte_ctx *tlb);
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);
static inline void __tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
if (pte_val(*ptep) & _PAGE_HASHPTE)
......
......@@ -43,7 +43,7 @@ struct ppc64_tlb_batch {
extern struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
static inline void __tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
int cpu = smp_processor_id();
......
......@@ -82,7 +82,7 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static inline void free_one_pmd(mmu_gather_t *tlb, pmd_t * dir)
static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
{
struct page *page;
......@@ -99,7 +99,7 @@ static inline void free_one_pmd(mmu_gather_t *tlb, pmd_t * dir)
pte_free_tlb(tlb, page);
}
static inline void free_one_pgd(mmu_gather_t *tlb, pgd_t * dir)
static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
{
int j;
pmd_t * pmd;
......@@ -124,7 +124,7 @@ static inline void free_one_pgd(mmu_gather_t *tlb, pgd_t * dir)
*
* Must be called with pagetable lock held.
*/
void clear_page_tables(mmu_gather_t *tlb, unsigned long first, int nr)
void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
{
pgd_t * page_dir = tlb->mm->pgd;
......@@ -369,7 +369,8 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
}
static void
zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size)
zap_pte_range(struct mmu_gather *tlb, pmd_t * pmd,
unsigned long address, unsigned long size)
{
unsigned long offset;
pte_t *ptep;
......@@ -416,7 +417,9 @@ zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned lo
pte_unmap(ptep-1);
}
static void zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long address, unsigned long size)
static void
zap_pmd_range(struct mmu_gather *tlb, pgd_t * dir,
unsigned long address, unsigned long size)
{
pmd_t * pmd;
unsigned long end;
......@@ -439,7 +442,8 @@ static void zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long address,
} while (address < end);
}
void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned long address, unsigned long end)
void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long address, unsigned long end)
{
pgd_t * dir;
......@@ -462,7 +466,7 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo
tlb_end_vma(tlb, vma);
}
/* Dispose of an entire mmu_gather_t per rescheduling point */
/* Dispose of an entire struct mmu_gather per rescheduling point */
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE)
#endif
......@@ -486,7 +490,7 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size)
{
struct mm_struct *mm = vma->vm_mm;
mmu_gather_t *tlb;
struct mmu_gather *tlb;
unsigned long end, block;
might_sleep();
......
......@@ -23,8 +23,8 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
extern void unmap_page_range(mmu_gather_t *,struct vm_area_struct *vma, unsigned long address, unsigned long size);
extern void clear_page_tables(mmu_gather_t *tlb, unsigned long first, int nr);
extern void unmap_page_range(struct mmu_gather *,struct vm_area_struct *vma, unsigned long address, unsigned long size);
extern void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr);
/*
* WARNING: the debugging will use recursive algorithms so never enable this
......@@ -900,7 +900,7 @@ struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long add
* "prev", if it exists, points to a vma before the one
* we just free'd - but there's no telling how much before.
*/
static void free_pgtables(mmu_gather_t *tlb, struct vm_area_struct *prev,
static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
unsigned long start, unsigned long end)
{
unsigned long first = start & PGDIR_MASK;
......@@ -1008,7 +1008,7 @@ static void unmap_region(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
mmu_gather_t *tlb;
struct mmu_gather *tlb;
tlb = tlb_gather_mmu(mm, 0);
......@@ -1277,7 +1277,7 @@ void build_mmap_rb(struct mm_struct * mm)
/* Release all mmaps. */
void exit_mmap(struct mm_struct * mm)
{
mmu_gather_t *tlb;
struct mmu_gather *tlb;
struct vm_area_struct * mpnt;
profile_exit_mmap(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment