Commit 0adb24e0 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Fix ordering of cache and TLB flushes

The change to flush_kernel_vmap_range() wasn't sufficient to avoid the
SMP stalls.  The problem is some drivers call these routines with
interrupts disabled.  Interrupts need to be enabled for flush_tlb_all()
and flush_cache_all() to work.  This version adds checks to ensure
interrupts are not disabled before calling routines that need IPI
interrupts.  When interrupts are disabled, we now drop into slower code.

The attached change fixes the ordering of cache and TLB flushes in
several cases.  When we flush the cache using the existing PTE/TLB
entries, we need to flush the TLB after doing the cache flush.  We don't
need to do this when we flush the entire instruction and data caches as
these flushes don't use the existing TLB entries.  The same is true for
tmpalias region flushes.

The flush_kernel_vmap_range() and invalidate_kernel_vmap_range()
routines have been updated.

Secondly, we added a new purge_kernel_dcache_range_asm() routine to
pacache.S and use it in invalidate_kernel_vmap_range().  Nominally,
purges are faster than flushes as the cache lines don't have to be
written back to memory.

Hopefully, this is sufficient to resolve the remaining problems due to
cache speculation.  So far, testing indicates that this is the case.  I
did work up a patch using tmpalias flushes, but there is a performance
hit because we need the physical address for each page, and we also need
to sequence access to the tmpalias flush code.  This increases the
probability of stalls.

Signed-off-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.9+
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent d8a5b805
...@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long); ...@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *); void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *); void flush_kernel_icache_page(void *);
......
...@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page); ...@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
int __flush_tlb_range(unsigned long sid, unsigned long start, int __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long flags, size; unsigned long flags;
size = (end - start); if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
if (size >= parisc_tlb_flush_threshold) { end - start >= parisc_tlb_flush_threshold) {
flush_tlb_all(); flush_tlb_all();
return 1; return 1;
} }
...@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
struct vm_area_struct *vma; struct vm_area_struct *vma;
pgd_t *pgd; pgd_t *pgd;
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_all();
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) { if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
mm_total_size(mm) >= parisc_cache_flush_threshold) {
flush_tlb_all();
flush_cache_all(); flush_cache_all();
return; return;
} }
...@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
if (mm->context == mfsp(3)) { if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
if ((vma->vm_flags & VM_EXEC) == 0) if (vma->vm_flags & VM_EXEC)
continue; flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
flush_user_icache_range_asm(vma->vm_start, vma->vm_end); flush_tlb_range(vma, vma->vm_start, vma->vm_end);
} }
return; return;
} }
...@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
BUG_ON(!vma->vm_mm->context); if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
if ((end - start) >= parisc_cache_flush_threshold
|| vma->vm_mm->context != mfsp(3)) {
flush_cache_all(); flush_cache_all();
return; return;
} }
...@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma, ...@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
flush_user_dcache_range_asm(start, end); flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end); flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
} }
void void
...@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long ...@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
if (parisc_requires_coherency()) flush_tlb_page(vma, vmaddr);
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} }
} }
...@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long ...@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
void flush_kernel_vmap_range(void *vaddr, int size) void flush_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((unsigned long)size > parisc_cache_flush_threshold) if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache(); flush_data_cache();
else return;
flush_kernel_dcache_range_asm(start, start + size); }
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
} }
EXPORT_SYMBOL(flush_kernel_vmap_range); EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size) void invalidate_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((unsigned long)size > parisc_cache_flush_threshold) if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache(); flush_data_cache();
else return;
flush_kernel_dcache_range_asm(start, start + size); }
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
} }
EXPORT_SYMBOL(invalidate_kernel_vmap_range); EXPORT_SYMBOL(invalidate_kernel_vmap_range);
...@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) ...@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
.procend .procend
ENDPROC_CFI(flush_kernel_dcache_range_asm) ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
pdc,m %r23(%r26)
sync
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm) ENTRY_CFI(flush_user_icache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment