Commit 4f193867 authored by Sven Schnelle's avatar Sven Schnelle Committed by Helge Deller

parisc: deduplicate code in flush_cache_mm() and flush_cache_range()

Parts of both functions are the same, so deduplicate them. No functional
change.
Signed-off-by: default avatarSven Schnelle <svens@stackframe.org>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent a5e8ca37
...@@ -543,10 +543,33 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) ...@@ -543,10 +543,33 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
return ptep; return ptep;
} }
static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
unsigned long addr, pfn;
pte_t *ptep;
for (addr = start; addr < end; addr += PAGE_SIZE) {
ptep = get_ptep(mm->pgd, addr);
if (ptep) {
pfn = pte_pfn(*ptep);
flush_cache_page(vma, addr, pfn);
}
}
}
static void flush_user_cache_tlb(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
}
void flush_cache_mm(struct mm_struct *mm) void flush_cache_mm(struct mm_struct *mm)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
pgd_t *pgd;
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
...@@ -560,46 +583,20 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -560,46 +583,20 @@ void flush_cache_mm(struct mm_struct *mm)
preempt_disable(); preempt_disable();
if (mm->context == mfsp(3)) { if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next)
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
flush_tlb_range(vma, vma->vm_start, vma->vm_end);
}
preempt_enable(); preempt_enable();
return; return;
} }
pgd = mm->pgd; for (vma = mm->mmap; vma; vma = vma->vm_next)
for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
unsigned long addr;
for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
if (unlikely(mm->context)) {
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
preempt_enable(); preempt_enable();
} }
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
pgd_t *pgd;
unsigned long addr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) { end - start >= parisc_cache_flush_threshold) {
if (vma->vm_mm->context) if (vma->vm_mm->context)
...@@ -610,30 +607,12 @@ void flush_cache_range(struct vm_area_struct *vma, ...@@ -610,30 +607,12 @@ void flush_cache_range(struct vm_area_struct *vma,
preempt_disable(); preempt_disable();
if (vma->vm_mm->context == mfsp(3)) { if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end); flush_user_cache_tlb(vma, start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
preempt_enable(); preempt_enable();
return; return;
} }
pgd = vma->vm_mm->pgd; flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn)) {
if (unlikely(vma->vm_mm->context)) {
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
preempt_enable(); preempt_enable();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment