Commit d6dd61c8 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Andi Kleen

[PATCH] x86: PARAVIRT: add hooks to intercept mm creation and destruction

Add hooks to allow a paravirt implementation to track the lifetime of
an mm.  Paravirtualization requires three hooks, but only two are
needed in common code.  They are:

arch_dup_mmap, which is called when a new mmap is created at fork

arch_exit_mmap, which is called when the last process reference to an
  mm is dropped, which typically happens on exit and exec.

The third hook is activate_mm, which is called from the arch-specific
activate_mm() macro/function, and so doesn't need stub versions for
other architectures.  It's called when an mm is first used.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Cc: linux-arch@vger.kernel.org
Cc: James Bottomley <James.Bottomley@SteelEye.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
parent 5311ab62
...@@ -237,6 +237,10 @@ struct paravirt_ops paravirt_ops = { ...@@ -237,6 +237,10 @@ struct paravirt_ops paravirt_ops = {
.irq_enable_sysexit = native_irq_enable_sysexit, .irq_enable_sysexit = native_irq_enable_sysexit,
.iret = native_iret, .iret = native_iret,
.dup_mmap = paravirt_nop,
.exit_mmap = paravirt_nop,
.activate_mm = paravirt_nop,
.startup_ipi_hook = paravirt_nop, .startup_ipi_hook = paravirt_nop,
}; };
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm-generic/mm_hooks.h>
/* /*
* Force a context reload. This is needed when we change the page * Force a context reload. This is needed when we change the page
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
void __check_kvm_seq(struct mm_struct *mm); void __check_kvm_seq(struct mm_struct *mm);
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#ifndef __ASM_ARM_MMU_CONTEXT_H #ifndef __ASM_ARM_MMU_CONTEXT_H
#define __ASM_ARM_MMU_CONTEXT_H #define __ASM_ARM_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
#define init_new_context(tsk,mm) 0 #define init_new_context(tsk,mm) 0
#define destroy_context(mm) do { } while(0) #define destroy_context(mm) do { } while(0)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm-generic/mm_hooks.h>
/* /*
* The MMU "context" consists of two things: * The MMU "context" consists of two things:
......
#ifndef __CRIS_MMU_CONTEXT_H #ifndef __CRIS_MMU_CONTEXT_H
#define __CRIS_MMU_CONTEXT_H #define __CRIS_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void get_mmu_context(struct mm_struct *mm); extern void get_mmu_context(struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
/*
* Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to
* be included in asm-FOO/mmu_context.h for any arch FOO which doesn't
* need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* _ASM_GENERIC_MM_HOOKS_H */
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
...@@ -5,6 +5,16 @@ ...@@ -5,6 +5,16 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/paravirt.h>
#ifndef CONFIG_PARAVIRT
#include <asm-generic/mm_hooks.h>
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
}
#endif /* !CONFIG_PARAVIRT */
/* /*
* Used for LDT copy/destruction. * Used for LDT copy/destruction.
...@@ -66,6 +76,9 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -66,6 +76,9 @@ static inline void switch_mm(struct mm_struct *prev,
asm("movl %0,%%gs": :"r" (0)); asm("movl %0,%%gs": :"r" (0));
#define activate_mm(prev, next) \ #define activate_mm(prev, next) \
switch_mm((prev),(next),NULL) do { \
paravirt_activate_mm(prev, next); \
switch_mm((prev),(next),NULL); \
} while(0);
#endif #endif
...@@ -119,6 +119,12 @@ struct paravirt_ops ...@@ -119,6 +119,12 @@ struct paravirt_ops
void (*io_delay)(void); void (*io_delay)(void);
void (*activate_mm)(struct mm_struct *prev,
struct mm_struct *next);
void (*dup_mmap)(struct mm_struct *oldmm,
struct mm_struct *mm);
void (*exit_mmap)(struct mm_struct *mm);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
void (*apic_write)(unsigned long reg, unsigned long v); void (*apic_write)(unsigned long reg, unsigned long v);
void (*apic_write_atomic)(unsigned long reg, unsigned long v); void (*apic_write_atomic)(unsigned long reg, unsigned long v);
...@@ -395,6 +401,23 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, ...@@ -395,6 +401,23 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
} }
#endif #endif
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
paravirt_ops.activate_mm(prev, next);
}
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
paravirt_ops.dup_mmap(oldmm, mm);
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
paravirt_ops.exit_mmap(mm);
}
#define __flush_tlb() paravirt_ops.flush_tlb_user() #define __flush_tlb() paravirt_ops.flush_tlb_user()
#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm-generic/mm_hooks.h>
struct ia64_ctx { struct ia64_ctx {
spinlock_t lock; spinlock_t lock;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
/* /*
* Cache of MMU context last used. * Cache of MMU context last used.
......
#ifndef __M68K_MMU_CONTEXT_H #ifndef __M68K_MMU_CONTEXT_H
#define __M68K_MMU_CONTEXT_H #define __M68K_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <asm/smtc.h> #include <asm/smtc.h>
#endif /* SMTC */ #endif /* SMTC */
#include <asm-generic/mm_hooks.h>
/* /*
* For the fast tlb miss handlers, we keep a per cpu array of pointers * For the fast tlb miss handlers, we keep a per cpu array of pointers
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm-generic/mm_hooks.h>
/* /*
* Copyright (C) 2001 PPC 64 Team, IBM Corp * Copyright (C) 2001 PPC 64 Team, IBM Corp
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm-generic/mm_hooks.h>
/* /*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define __S390_MMU_CONTEXT_H #define __S390_MMU_CONTEXT_H
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm-generic/mm_hooks.h>
/* /*
* get a new mmu context.. S390 don't know about contexts. * get a new mmu context.. S390 don't know about contexts.
*/ */
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm-generic/mm_hooks.h>
/* /*
* The MMU "context" consists of two things: * The MMU "context" consists of two things:
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
extern unsigned long mmu_context_cache; extern unsigned long mmu_context_cache;
#include <asm/page.h> #include <asm/page.h>
#include <asm-generic/mm_hooks.h>
/* Current mm's pgd */ /* Current mm's pgd */
extern pgd_t *mmu_pdtp_cache; extern pgd_t *mmu_pdtp_cache;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#ifndef __UM_MMU_CONTEXT_H #ifndef __UM_MMU_CONTEXT_H
#define __UM_MMU_CONTEXT_H #define __UM_MMU_CONTEXT_H
#include <asm-generic/mm_hooks.h>
#include "linux/sched.h" #include "linux/sched.h"
#include "choose-mode.h" #include "choose-mode.h"
#include "um_mmu.h" #include "um_mmu.h"
......
#ifndef __V850_MMU_CONTEXT_H__ #ifndef __V850_MMU_CONTEXT_H__
#define __V850_MMU_CONTEXT_H__ #define __V850_MMU_CONTEXT_H__
#include <asm-generic/mm_hooks.h>
#define destroy_context(mm) ((void)0) #define destroy_context(mm) ((void)0)
#define init_new_context(tsk,mm) 0 #define init_new_context(tsk,mm) 0
#define switch_mm(prev,next,tsk) ((void)0) #define switch_mm(prev,next,tsk) ((void)0)
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <asm/pda.h> #include <asm/pda.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
/* /*
* possibly do the LDT unload here? * possibly do the LDT unload here?
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
#define XCHAL_MMU_ASID_BITS 8 #define XCHAL_MMU_ASID_BITS 8
......
...@@ -286,6 +286,8 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -286,6 +286,8 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval) if (retval)
goto out; goto out;
} }
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0; retval = 0;
out: out:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/mmu_context.h>
#ifndef arch_mmap_check #ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) (0) #define arch_mmap_check(addr, len, flags) (0)
...@@ -1979,6 +1980,9 @@ void exit_mmap(struct mm_struct *mm) ...@@ -1979,6 +1980,9 @@ void exit_mmap(struct mm_struct *mm)
unsigned long nr_accounted = 0; unsigned long nr_accounted = 0;
unsigned long end; unsigned long end;
/* mm's last user has gone, and its about to be pulled down */
arch_exit_mmap(mm);
lru_add_drain(); lru_add_drain();
flush_cache_mm(mm); flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1); tlb = tlb_gather_mmu(mm, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment