Commit e2c6ee0a authored by Russell King's avatar Russell King

[ARM] Use cpu_vm_mask to determine whether to flush TLB/caches.

Since bit 0 is only set when the MM is mapped onto the CPU, we can use
this rather than comparing the MM pointer with current->active_mm.
This simplifies the inline cache flushing and tlb code.
parent a6a4b52e
...@@ -256,14 +256,14 @@ extern void dmac_flush_range(unsigned long, unsigned long); ...@@ -256,14 +256,14 @@ extern void dmac_flush_range(unsigned long, unsigned long);
static inline void flush_cache_mm(struct mm_struct *mm) static inline void flush_cache_mm(struct mm_struct *mm)
{ {
if (current->active_mm == mm) if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
__cpuc_flush_user_all(); __cpuc_flush_user_all();
} }
static inline void static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (current->active_mm == vma->vm_mm) if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags); vma->vm_flags);
} }
...@@ -271,7 +271,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -271,7 +271,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
static inline void static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{ {
if (current->active_mm == vma->vm_mm) { if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
} }
......
...@@ -262,7 +262,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -262,7 +262,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (mm == current->active_mm) { if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
if (tlb_flag(TLB_V3_FULL)) if (tlb_flag(TLB_V3_FULL))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_U_FULL)) if (tlb_flag(TLB_V4_U_FULL))
...@@ -292,7 +292,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ...@@ -292,7 +292,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (vma->vm_mm == current->active_mm) { if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
if (tlb_flag(TLB_V3_PAGE)) if (tlb_flag(TLB_V3_PAGE))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr)); asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr));
if (tlb_flag(TLB_V4_U_PAGE)) if (tlb_flag(TLB_V4_U_PAGE))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment