Commit 99ef44b7 authored by Linus Torvalds's avatar Linus Torvalds Committed by Linus Torvalds

Clean up %cr3 loading on x86, fix lazy TLB problem

parent 6c52c43c
...@@ -321,7 +321,7 @@ void machine_real_restart(unsigned char *code, int length) ...@@ -321,7 +321,7 @@ void machine_real_restart(unsigned char *code, int length)
/* /*
* Use `swapper_pg_dir' as our page directory. * Use `swapper_pg_dir' as our page directory.
*/ */
asm volatile("movl %0,%%cr3": :"r" (__pa(swapper_pg_dir))); load_cr3(swapper_pg_dir);
/* Write 0x1234 to absolute memory location 0x472. The BIOS reads /* Write 0x1234 to absolute memory location 0x472. The BIOS reads
this on booting to tell it to "Bypass memory test (also warm this on booting to tell it to "Bypass memory test (also warm
......
...@@ -299,12 +299,16 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; ...@@ -299,12 +299,16 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
/* /*
* We cannot call mmdrop() because we are in interrupt context, * We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask. * instead update mm->cpu_vm_mask.
*
* We need to reload %cr3 since the page tables may be going
* away from under us..
*/ */
static void inline leave_mm (unsigned long cpu) static void inline leave_mm (unsigned long cpu)
{ {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
BUG(); BUG();
clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask); clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir);
} }
/* /*
......
...@@ -307,7 +307,7 @@ void __init paging_init(void) ...@@ -307,7 +307,7 @@ void __init paging_init(void)
{ {
pagetable_init(); pagetable_init();
__asm__( "movl %0,%%cr3\n" ::"r"(__pa(swapper_pg_dir))); load_cr3(swapper_pg_dir);
#if CONFIG_X86_PAE #if CONFIG_X86_PAE
/* /*
......
...@@ -38,7 +38,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str ...@@ -38,7 +38,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
set_bit(cpu, &next->cpu_vm_mask); set_bit(cpu, &next->cpu_vm_mask);
/* Re-load page tables */ /* Re-load page tables */
asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd))); load_cr3(next->pgd);
/* load_LDT, if either the previous or next thread /* load_LDT, if either the previous or next thread
* has a non-default LDT. * has a non-default LDT.
...@@ -53,9 +53,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str ...@@ -53,9 +53,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
BUG(); BUG();
if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must flush our tlb. * tlb flush IPI delivery. We must reload %cr3.
*/ */
local_flush_tlb(); load_cr3(next->pgd);
load_LDT(&next->context); load_LDT(&next->context);
} }
} }
......
...@@ -173,6 +173,10 @@ static inline unsigned int cpuid_edx(unsigned int op) ...@@ -173,6 +173,10 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx; return edx;
} }
#define load_cr3(pgdir) \
asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
/* /*
* Intel CPU features in CR4 * Intel CPU features in CR4
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment