Commit 8a6879c6 authored by Rusty Russell's avatar Rusty Russell Committed by Steve French

[PATCH] switch_mm and enter_lazy_tlb: remove cpu arg

switch_mm and enter_lazy_tlb take a CPU arg, which is always
smp_processor_id().  This is misleading, and pointless if they use
per-cpu variables or other optimizations.  gcc will eliminate
redundant smp_processor_id() (in inline functions) anyway.

This removes that arg from all the architectures.
parent b993be7e
...@@ -283,7 +283,7 @@ get_mmu_context(struct mm_struct *mm) ...@@ -283,7 +283,7 @@ get_mmu_context(struct mm_struct *mm)
void void
switch_mm(struct mm_struct *prev, struct mm_struct *next, switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu) struct task_struct *tsk)
{ {
/* make sure we have a context */ /* make sure we have a context */
......
...@@ -495,7 +495,7 @@ void __init cpu_init (void) ...@@ -495,7 +495,7 @@ void __init cpu_init (void)
current->active_mm = &init_mm; current->active_mm = &init_mm;
if (current->mm) if (current->mm)
BUG(); BUG();
enter_lazy_tlb(&init_mm, current, cpu); enter_lazy_tlb(&init_mm, current);
load_esp0(t, thread->esp0); load_esp0(t, thread->esp0);
set_tss_desc(cpu,t); set_tss_desc(cpu,t);
......
...@@ -456,7 +456,7 @@ smp_cpu_init(int cpunum) ...@@ -456,7 +456,7 @@ smp_cpu_init(int cpunum)
current->active_mm = &init_mm; current->active_mm = &init_mm;
if(current->mm) if(current->mm)
BUG(); BUG();
enter_lazy_tlb(&init_mm, current, cpunum); enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQ's are enabled or pending */ init_IRQ(); /* make sure no IRQ's are enabled or pending */
} }
......
...@@ -107,7 +107,7 @@ void __devinit cpu_init (void) ...@@ -107,7 +107,7 @@ void __devinit cpu_init (void)
current->active_mm = &init_mm; current->active_mm = &init_mm;
if (current->mm) if (current->mm)
BUG(); BUG();
enter_lazy_tlb(&init_mm, current, nr); enter_lazy_tlb(&init_mm, current);
} }
/* /*
......
...@@ -113,8 +113,9 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) ...@@ -113,8 +113,9 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
} }
void switch_mm(struct mm_struct *prev, struct mm_struct *next, void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
unsigned cpu = smp_processor_id();
if (prev != next) if (prev != next)
clear_bit(cpu, &prev->cpu_vm_mask); clear_bit(cpu, &prev->cpu_vm_mask);
set_bit(cpu, &next->cpu_vm_mask); set_bit(cpu, &next->cpu_vm_mask);
......
...@@ -288,7 +288,7 @@ void __init cpu_init (void) ...@@ -288,7 +288,7 @@ void __init cpu_init (void)
me->active_mm = &init_mm; me->active_mm = &init_mm;
if (me->mm) if (me->mm)
BUG(); BUG();
enter_lazy_tlb(&init_mm, me, cpu); enter_lazy_tlb(&init_mm, me);
set_tss_desc(cpu, t); set_tss_desc(cpu, t);
load_TR_desc(); load_TR_desc();
......
...@@ -552,7 +552,7 @@ static void unuse_mm(struct mm_struct *mm) ...@@ -552,7 +552,7 @@ static void unuse_mm(struct mm_struct *mm)
{ {
current->mm = NULL; current->mm = NULL;
/* active_mm is still 'mm' */ /* active_mm is still 'mm' */
enter_lazy_tlb(mm, current, smp_processor_id()); enter_lazy_tlb(mm, current);
} }
/* Run on kevent's context. FIXME: needs to be per-cpu and warn if an /* Run on kevent's context. FIXME: needs to be per-cpu and warn if an
......
...@@ -68,7 +68,7 @@ struct alpha_machine_vector ...@@ -68,7 +68,7 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long); int (*mv_is_ioaddr)(unsigned long);
void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *, void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
struct task_struct *, long); struct task_struct *);
void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *); void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
void (*mv_flush_tlb_current)(struct mm_struct *); void (*mv_flush_tlb_current)(struct mm_struct *);
......
...@@ -130,11 +130,12 @@ __get_new_mm_context(struct mm_struct *mm, long cpu) ...@@ -130,11 +130,12 @@ __get_new_mm_context(struct mm_struct *mm, long cpu)
__EXTERN_INLINE void __EXTERN_INLINE void
ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
struct task_struct *next, long cpu) struct task_struct *next)
{ {
/* Check if our ASN is of an older version, and thus invalid. */ /* Check if our ASN is of an older version, and thus invalid. */
unsigned long asn; unsigned long asn;
unsigned long mmc; unsigned long mmc;
long cpu = smp_processor_id();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu_data[cpu].asn_lock = 1; cpu_data[cpu].asn_lock = 1;
...@@ -159,7 +160,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, ...@@ -159,7 +160,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
__EXTERN_INLINE void __EXTERN_INLINE void
ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
struct task_struct *next, long cpu) struct task_struct *next)
{ {
/* As described, ASN's are broken for TLB usage. But we can /* As described, ASN's are broken for TLB usage. But we can
optimize for switching between threads -- if the mm is optimize for switching between threads -- if the mm is
...@@ -174,7 +175,7 @@ ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, ...@@ -174,7 +175,7 @@ ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Do continue to allocate ASNs, because we can still use them /* Do continue to allocate ASNs, because we can still use them
to avoid flushing the icache. */ to avoid flushing the icache. */
ev5_switch_mm(prev_mm, next_mm, next, cpu); ev5_switch_mm(prev_mm, next_mm, next);
} }
extern void __load_new_mm_context(struct mm_struct *); extern void __load_new_mm_context(struct mm_struct *);
...@@ -212,14 +213,14 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) ...@@ -212,14 +213,14 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#ifdef CONFIG_ALPHA_GENERIC #ifdef CONFIG_ALPHA_GENERIC
# define switch_mm(a,b,c,d) alpha_mv.mv_switch_mm((a),(b),(c),(d)) # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
#else #else
# ifdef CONFIG_ALPHA_EV4 # ifdef CONFIG_ALPHA_EV4
# define switch_mm(a,b,c,d) ev4_switch_mm((a),(b),(c),(d)) # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
# define activate_mm(x,y) ev4_activate_mm((x),(y)) # define activate_mm(x,y) ev4_activate_mm((x),(y))
# else # else
# define switch_mm(a,b,c,d) ev5_switch_mm((a),(b),(c),(d)) # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
# define activate_mm(x,y) ev5_activate_mm((x),(y)) # define activate_mm(x,y) ev5_activate_mm((x),(y))
# endif # endif
#endif #endif
...@@ -245,7 +246,7 @@ destroy_context(struct mm_struct *mm) ...@@ -245,7 +246,7 @@ destroy_context(struct mm_struct *mm)
} }
static inline void static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
tsk->thread_info->pcb.ptbr tsk->thread_info->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
* tsk->mm will be NULL * tsk->mm will be NULL
*/ */
static inline void static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -40,7 +40,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) ...@@ -40,7 +40,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
*/ */
static inline void static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next, switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu) struct task_struct *tsk)
{ {
if (prev != next) { if (prev != next) {
cpu_switch_mm(next->pgd, next); cpu_switch_mm(next->pgd, next);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* tsk->mm will be NULL * tsk->mm will be NULL
*/ */
static inline void static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -36,7 +36,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) ...@@ -36,7 +36,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
*/ */
static inline void static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next, switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu) struct task_struct *tsk)
{ {
cpu_switch_mm(next->pgd, next); cpu_switch_mm(next->pgd, next);
} }
......
...@@ -5,11 +5,11 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); ...@@ -5,11 +5,11 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void get_mmu_context(struct mm_struct *mm); extern void get_mmu_context(struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm);
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu); struct task_struct *tsk);
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm((prev),(next),NULL,smp_processor_id()) #define activate_mm(prev,next) switch_mm((prev),(next),NULL)
/* current active pgd - this is similar to other processors pgd /* current active pgd - this is similar to other processors pgd
* registers like cr3 on the i386 * registers like cr3 on the i386
...@@ -17,7 +17,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -17,7 +17,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */ extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -20,7 +20,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -20,7 +20,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#define destroy_context(mm) do { } while(0) #define destroy_context(mm) do { } while(0)
#define deactivate_mm(tsk,mm) do { } while(0) #define deactivate_mm(tsk,mm) do { } while(0)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{ {
} }
......
...@@ -14,16 +14,21 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); ...@@ -14,16 +14,21 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm); void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned cpu = smp_processor_id();
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
cpu_tlbstate[cpu].state = TLBSTATE_LAZY; cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
#endif #endif
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{ {
int cpu = smp_processor_id();
if (likely(prev != next)) { if (likely(prev != next)) {
/* stop flush ipis for the previous mm */ /* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask); clear_bit(cpu, &prev->cpu_vm_mask);
...@@ -62,6 +67,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str ...@@ -62,6 +67,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
#define activate_mm(prev, next) \ #define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id()) switch_mm((prev),(next),NULL)
#endif #endif
...@@ -71,7 +71,7 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush); ...@@ -71,7 +71,7 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void wrap_mmu_context (struct mm_struct *mm); extern void wrap_mmu_context (struct mm_struct *mm);
static inline void static inline void
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -198,7 +198,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next) ...@@ -198,7 +198,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
activate_context(next); activate_context(next);
} }
#define switch_mm(prev_mm,next_mm,next_task,cpu) activate_mm(prev_mm, next_mm) #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
# endif /* ! __ASSEMBLY__ */ # endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */ #endif /* _ASM_IA64_MMU_CONTEXT_H */
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <linux/config.h> #include <linux/config.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -79,7 +79,7 @@ extern inline void switch_mm_0460(struct mm_struct *mm) ...@@ -79,7 +79,7 @@ extern inline void switch_mm_0460(struct mm_struct *mm)
asm volatile (".chip 68k"); asm volatile (".chip 68k");
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{ {
if (prev != next) { if (prev != next) {
if (CPU_IS_020_OR_030) if (CPU_IS_020_OR_030)
...@@ -137,7 +137,7 @@ static inline void activate_context(struct mm_struct *mm) ...@@ -137,7 +137,7 @@ static inline void activate_context(struct mm_struct *mm)
sun3_put_context(mm->context); sun3_put_context(mm->context);
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{ {
activate_context(tsk->mm); activate_context(tsk->mm);
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -19,7 +19,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -19,7 +19,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#define destroy_context(mm) do { } while(0) #define destroy_context(mm) do { } while(0)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{ {
} }
......
...@@ -49,7 +49,7 @@ extern unsigned long pgd_current[]; ...@@ -49,7 +49,7 @@ extern unsigned long pgd_current[];
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -92,9 +92,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -92,9 +92,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
unsigned long flags; unsigned long flags;
unsigned cpu = smp_processor_id();
local_irq_save(flags); local_irq_save(flags);
......
...@@ -40,7 +40,7 @@ extern unsigned long pgd_current[]; ...@@ -40,7 +40,7 @@ extern unsigned long pgd_current[];
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -43,7 +43,7 @@ static inline void load_context(mm_context_t context) ...@@ -43,7 +43,7 @@ static inline void load_context(mm_context_t context)
#endif #endif
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{ {
if (prev != next) { if (prev != next) {
...@@ -69,6 +69,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) ...@@ -69,6 +69,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
if (next->context == 0) if (next->context == 0)
next->context = alloc_sid(); next->context = alloc_sid();
switch_mm(prev,next,current,0); switch_mm(prev,next,current);
} }
#endif #endif
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
-- Dan -- Dan
*/ */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -153,7 +153,7 @@ static inline void destroy_context(struct mm_struct *mm) ...@@ -153,7 +153,7 @@ static inline void destroy_context(struct mm_struct *mm)
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu) struct task_struct *tsk)
{ {
tsk->thread.pgdir = next->pgd; tsk->thread.pgdir = next->pgd;
get_mmu_context(next); get_mmu_context(next);
......
...@@ -56,7 +56,7 @@ struct mmu_context_queue_t { ...@@ -56,7 +56,7 @@ struct mmu_context_queue_t {
extern struct mmu_context_queue_t mmu_context_queue; extern struct mmu_context_queue_t mmu_context_queue;
static inline void static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -140,10 +140,10 @@ extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm); ...@@ -140,10 +140,10 @@ extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
*/ */
static inline void static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next, switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu) struct task_struct *tsk)
{ {
flush_stab(tsk, next); flush_stab(tsk, next);
set_bit(cpu, &next->cpu_vm_mask); set_bit(smp_processor_id(), &next->cpu_vm_mask);
} }
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
...@@ -153,7 +153,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -153,7 +153,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* the context for the new mm so we see the new mappings. * the context for the new mm so we see the new mappings.
*/ */
#define activate_mm(active_mm, mm) \ #define activate_mm(active_mm, mm) \
switch_mm(active_mm, mm, current, smp_processor_id()); switch_mm(active_mm, mm, current);
#define VSID_RANDOMIZER 42470972311 #define VSID_RANDOMIZER 42470972311
#define VSID_MASK 0xfffffffff #define VSID_MASK 0xfffffffff
......
...@@ -17,12 +17,12 @@ ...@@ -17,12 +17,12 @@
#define destroy_context(mm) flush_tlb_mm(mm) #define destroy_context(mm) flush_tlb_mm(mm)
static inline void enter_lazy_tlb(struct mm_struct *mm, static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
unsigned long pgd; unsigned long pgd;
...@@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
: : "m" (pgd) ); : : "m" (pgd) );
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
set_bit(cpu, &next->cpu_vm_mask); set_bit(smp_processor_id(), &next->cpu_vm_mask);
} }
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
...@@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
extern inline void activate_mm(struct mm_struct *prev, extern inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next) struct mm_struct *next)
{ {
switch_mm(prev, next, current, smp_processor_id()); switch_mm(prev, next, current);
} }
#endif #endif
...@@ -129,7 +129,7 @@ static __inline__ void activate_context(struct mm_struct *mm) ...@@ -129,7 +129,7 @@ static __inline__ void activate_context(struct mm_struct *mm)
(Currently not used) */ (Currently not used) */
static __inline__ void switch_mm(struct mm_struct *prev, static __inline__ void switch_mm(struct mm_struct *prev,
struct mm_struct *next, struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu) struct task_struct *tsk)
{ {
if (likely(prev != next)) { if (likely(prev != next)) {
unsigned long __pgdir = (unsigned long)next->pgd; unsigned long __pgdir = (unsigned long)next->pgd;
...@@ -144,10 +144,10 @@ static __inline__ void switch_mm(struct mm_struct *prev, ...@@ -144,10 +144,10 @@ static __inline__ void switch_mm(struct mm_struct *prev,
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev, next) \ #define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id()) switch_mm((prev),(next),NULL)
static __inline__ void static __inline__ void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
...@@ -157,10 +157,10 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) ...@@ -157,10 +157,10 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
#define set_asid(asid) do { } while (0) #define set_asid(asid) do { } while (0)
#define get_asid() (0) #define get_asid() (0)
#define activate_context(mm) do { } while (0) #define activate_context(mm) do { } while (0)
#define switch_mm(prev,next,tsk,cpu) do { } while (0) #define switch_mm(prev,next,tsk) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) do { } while (0) #define activate_mm(prev,next) do { } while (0)
#define enter_lazy_tlb(mm,tsk,cpu) do { } while (0) #define enter_lazy_tlb(mm,tsk) do { } while (0)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -26,14 +26,14 @@ BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *) ...@@ -26,14 +26,14 @@ BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm) #define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
/* Switch the current MM context. */ /* Switch the current MM context. */
BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *, int) BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
#define switch_mm(old_mm, mm, tsk, cpu) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk, cpu) #define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
/* Activate a new MM instance for the current task. */ /* Activate a new MM instance for the current task. */
#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL, smp_processor_id()) #define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
...@@ -106,7 +106,7 @@ do { \ ...@@ -106,7 +106,7 @@ do { \
extern void __flush_tlb_mm(unsigned long, unsigned long); extern void __flush_tlb_mm(unsigned long, unsigned long);
/* Switch the current MM context. */ /* Switch the current MM context. */
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{ {
unsigned long ctx_valid; unsigned long ctx_valid;
...@@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str ...@@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
} }
{ {
unsigned long vm_mask = (1UL << cpu); unsigned long vm_mask = (1UL << smp_processor_id());
/* Even if (mm == old_mm) we _must_ check /* Even if (mm == old_mm) we _must_ check
* the cpu_vm_mask. If we do not we could * the cpu_vm_mask. If we do not we could
......
...@@ -21,8 +21,10 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) ...@@ -21,8 +21,10 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
extern void switch_mm_skas(int mm_fd); extern void switch_mm_skas(int mm_fd);
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
unsigned cpu = smp_processor_id();
if(prev != next){ if(prev != next){
clear_bit(cpu, &prev->cpu_vm_mask); clear_bit(cpu, &prev->cpu_vm_mask);
set_bit(cpu, &next->cpu_vm_mask); set_bit(cpu, &next->cpu_vm_mask);
...@@ -33,7 +35,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -33,7 +35,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
} }
static inline void enter_lazy_tlb(struct mm_struct *mm, static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
} }
......
...@@ -3,9 +3,9 @@ ...@@ -3,9 +3,9 @@
#define destroy_context(mm) ((void)0) #define destroy_context(mm) ((void)0)
#define init_new_context(tsk,mm) 0 #define init_new_context(tsk,mm) 0
#define switch_mm(prev,next,tsk,cpu) ((void)0) #define switch_mm(prev,next,tsk) ((void)0)
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) ((void)0) #define activate_mm(prev,next) ((void)0)
#define enter_lazy_tlb(mm,tsk,cpu) ((void)0) #define enter_lazy_tlb(mm,tsk) ((void)0)
#endif /* __V850_MMU_CONTEXT_H__ */ #endif /* __V850_MMU_CONTEXT_H__ */
...@@ -17,20 +17,21 @@ void destroy_context(struct mm_struct *mm); ...@@ -17,20 +17,21 @@ void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
if (read_pda(mmu_state) == TLBSTATE_OK) if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY); write_pda(mmu_state, TLBSTATE_LAZY);
} }
#else #else
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
#endif #endif
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu) struct task_struct *tsk)
{ {
unsigned cpu = smp_processor_id();
if (likely(prev != next)) { if (likely(prev != next)) {
/* stop flush ipis for the previous mm */ /* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask); clear_bit(cpu, &prev->cpu_vm_mask);
...@@ -68,7 +69,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -68,7 +69,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
} while(0) } while(0)
#define activate_mm(prev, next) \ #define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id()) switch_mm((prev),(next),NULL)
#endif #endif
...@@ -443,7 +443,7 @@ static inline void __exit_mm(struct task_struct * tsk) ...@@ -443,7 +443,7 @@ static inline void __exit_mm(struct task_struct * tsk)
/* more a memory barrier than a real lock */ /* more a memory barrier than a real lock */
task_lock(tsk); task_lock(tsk);
tsk->mm = NULL; tsk->mm = NULL;
enter_lazy_tlb(mm, current, smp_processor_id()); enter_lazy_tlb(mm, current);
task_unlock(tsk); task_unlock(tsk);
mmput(mm); mmput(mm);
} }
......
...@@ -646,9 +646,9 @@ static inline task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next ...@@ -646,9 +646,9 @@ static inline task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next
if (unlikely(!mm)) { if (unlikely(!mm)) {
next->active_mm = oldmm; next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count); atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next, smp_processor_id()); enter_lazy_tlb(oldmm, next);
} else } else
switch_mm(oldmm, mm, next, smp_processor_id()); switch_mm(oldmm, mm, next);
if (unlikely(!prev->mm)) { if (unlikely(!prev->mm)) {
prev->active_mm = NULL; prev->active_mm = NULL;
...@@ -2528,7 +2528,7 @@ void __init sched_init(void) ...@@ -2528,7 +2528,7 @@ void __init sched_init(void)
* The boot idle thread does lazy MMU switching as well: * The boot idle thread does lazy MMU switching as well:
*/ */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current, smp_processor_id()); enter_lazy_tlb(&init_mm, current);
} }
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment