Commit ae7a609c authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Prevent TLB speculation on flushed pages on CPUs that only support equivalent aliases

Helge noticed that we flush the TLB page in flush_cache_page but not in
flush_cache_range or flush_cache_mm.

For a long time, we have had random segmentation faults building
packages on machines with PA8800/8900 processors.  These machines only
support equivalent aliases.  We don't see these faults on machines that
don't require strict coherency.  So, it appears TLB speculation
sometimes leads to cache corruption on machines that require coherency.

This patch adds TLB flushes to flush_cache_range and flush_cache_mm when
coherency is required.  We only flush the TLB in flush_cache_page when
coherency is required.

The patch also optimizes flush_cache_range.  It turns out we always have
the right context to use flush_user_dcache_range_asm and
flush_user_icache_range_asm.

The patch has been tested for some time on rp3440, rp3410 and A500-44.
It's been boot tested on c8000.  No random segmentation faults were
observed during testing.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.9+
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 56188832
...@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm)
struct vm_area_struct *vma; struct vm_area_struct *vma;
pgd_t *pgd; pgd_t *pgd;
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_all();
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) { if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
...@@ -577,33 +581,22 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -577,33 +581,22 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long addr;
pgd_t *pgd;
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_range(vma, start, end);
if ((end - start) >= parisc_cache_flush_threshold) { if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all(); flush_cache_all();
return; return;
} }
if (vma->vm_mm->context == mfsp(3)) { BUG_ON(vma->vm_mm->context != mfsp(3));
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
return;
}
pgd = vma->vm_mm->pgd; flush_user_dcache_range_asm(start, end);
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { if (vma->vm_flags & VM_EXEC)
unsigned long pfn; flush_user_icache_range_asm(start, end);
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
} }
void void
...@@ -612,7 +605,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long ...@@ -612,7 +605,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
flush_tlb_page(vma, vmaddr); if (parisc_requires_coherency())
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment