Commit 5fbdefcf authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc fixes from Helge Deller:

 - a patch to change the ordering of cache and TLB flushes to hopefully
   fix the random segfaults we very rarely face (by Dave Anglin).

 - a patch to hide the virtual kernel memory layout due to security
   reasons.

 - two small patches to make the kernel run more smoothly under qemu.

* 'parisc-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Reduce irq overhead when run in qemu
  parisc: Use cr16 interval timers unconditionally on qemu
  parisc: Check if secondary CPUs want own PDC calls
  parisc: Hide virtual kernel memory layout
  parisc: Fix ordering of cache and TLB flushes
parents 0573fed9 636a415b
...@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long); ...@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *); void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *); void flush_kernel_icache_page(void *);
......
...@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency; ...@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
#define parisc_requires_coherency() (0) #define parisc_requires_coherency() (0)
#endif #endif
extern int running_on_qemu;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */ #endif /* __ASM_PARISC_PROCESSOR_H */
...@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page); ...@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
int __flush_tlb_range(unsigned long sid, unsigned long start, int __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long flags, size; unsigned long flags;
size = (end - start); if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
if (size >= parisc_tlb_flush_threshold) { end - start >= parisc_tlb_flush_threshold) {
flush_tlb_all(); flush_tlb_all();
return 1; return 1;
} }
...@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
struct vm_area_struct *vma; struct vm_area_struct *vma;
pgd_t *pgd; pgd_t *pgd;
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_all();
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) { if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
mm_total_size(mm) >= parisc_cache_flush_threshold) {
flush_tlb_all();
flush_cache_all(); flush_cache_all();
return; return;
} }
...@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
if (mm->context == mfsp(3)) { if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
if ((vma->vm_flags & VM_EXEC) == 0) if (vma->vm_flags & VM_EXEC)
continue;
flush_user_icache_range_asm(vma->vm_start, vma->vm_end); flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
flush_tlb_range(vma, vma->vm_start, vma->vm_end);
} }
return; return;
} }
...@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
BUG_ON(!vma->vm_mm->context); if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
if ((end - start) >= parisc_cache_flush_threshold
|| vma->vm_mm->context != mfsp(3)) {
flush_cache_all(); flush_cache_all();
return; return;
} }
...@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma, ...@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
flush_user_dcache_range_asm(start, end); flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end); flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
} }
void void
...@@ -604,7 +598,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long ...@@ -604,7 +598,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
if (parisc_requires_coherency())
flush_tlb_page(vma, vmaddr); flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} }
...@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long ...@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
void flush_kernel_vmap_range(void *vaddr, int size) void flush_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((unsigned long)size > parisc_cache_flush_threshold) if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache(); flush_data_cache();
else return;
flush_kernel_dcache_range_asm(start, start + size); }
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
} }
EXPORT_SYMBOL(flush_kernel_vmap_range); EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size) void invalidate_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((unsigned long)size > parisc_cache_flush_threshold) if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache(); flush_data_cache();
else return;
flush_kernel_dcache_range_asm(start, start + size); }
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
} }
EXPORT_SYMBOL(invalidate_kernel_vmap_range); EXPORT_SYMBOL(invalidate_kernel_vmap_range);
...@@ -138,6 +138,16 @@ $pgt_fill_loop: ...@@ -138,6 +138,16 @@ $pgt_fill_loop:
std %dp,0x18(%r10) std %dp,0x18(%r10)
#endif #endif
#ifdef CONFIG_64BIT
/* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Set the smp rendezvous address into page zero. /* Set the smp rendezvous address into page zero.
** It would be safer to do this in init_smp_config() but ** It would be safer to do this in init_smp_config() but
...@@ -196,12 +206,6 @@ common_stext: ...@@ -196,12 +206,6 @@ common_stext:
** Someday, palo might not do this for the Monarch either. ** Someday, palo might not do this for the Monarch either.
*/ */
2: 2:
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
mfctl %cr30,%r6 /* PCX-W2 firmware bug */ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
ldo PDC_PSW(%r0),%arg0 /* 21 */ ldo PDC_PSW(%r0),%arg0 /* 21 */
...@@ -268,6 +272,8 @@ $install_iva: ...@@ -268,6 +272,8 @@ $install_iva:
aligned_rfi: aligned_rfi:
pcxt_ssm_bug pcxt_ssm_bug
copy %r3, %arg0 /* PDCE_PROC for smp_callin() */
rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
/* Don't need NOPs, have 8 compliant insn before rfi */ /* Don't need NOPs, have 8 compliant insn before rfi */
......
...@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) ...@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
.procend .procend
ENDPROC_CFI(flush_kernel_dcache_range_asm) ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
pdc,m %r23(%r26)
sync
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm) ENTRY_CFI(flush_user_icache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS
......
...@@ -292,10 +292,15 @@ smp_cpu_init(int cpunum) ...@@ -292,10 +292,15 @@ smp_cpu_init(int cpunum)
* Slaves start using C here. Indirectly called from smp_slave_stext. * Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch) * Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/ */
void __init smp_callin(void) void __init smp_callin(unsigned long pdce_proc)
{ {
int slave_id = cpu_now_booting; int slave_id = cpu_now_booting;
#ifdef CONFIG_64BIT
WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
| PAGE0->mem_pdc) != pdce_proc);
#endif
smp_cpu_init(slave_id); smp_cpu_init(slave_id);
preempt_disable(); preempt_disable();
......
...@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) ...@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
next_tick = cpuinfo->it_value; next_tick = cpuinfo->it_value;
/* Calculate how many ticks have elapsed. */ /* Calculate how many ticks have elapsed. */
now = mfctl(16);
do { do {
++ticks_elapsed; ++ticks_elapsed;
next_tick += cpt; next_tick += cpt;
now = mfctl(16);
} while (next_tick - now > cpt); } while (next_tick - now > cpt);
/* Store (in CR16 cycles) up to when we are accounting right now. */ /* Store (in CR16 cycles) up to when we are accounting right now. */
...@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) ...@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
* if one or the other wrapped. If "now" is "bigger" we'll end up * if one or the other wrapped. If "now" is "bigger" we'll end up
* with a very large unsigned number. * with a very large unsigned number.
*/ */
while (next_tick - mfctl(16) > cpt) now = mfctl(16);
while (next_tick - now > cpt)
next_tick += cpt; next_tick += cpt;
/* Program the IT when to deliver the next interrupt. /* Program the IT when to deliver the next interrupt.
* Only bottom 32-bits of next_tick are writable in CR16! * Only bottom 32-bits of next_tick are writable in CR16!
* Timer interrupt will be delivered at least a few hundred cycles * Timer interrupt will be delivered at least a few hundred cycles
* after the IT fires, so if we are too close (<= 500 cycles) to the * after the IT fires, so if we are too close (<= 8000 cycles) to the
* next cycle, simply skip it. * next cycle, simply skip it.
*/ */
if (next_tick - mfctl(16) <= 500) if (next_tick - now <= 8000)
next_tick += cpt; next_tick += cpt;
mtctl(next_tick, 16); mtctl(next_tick, 16);
...@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void) ...@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
* different sockets, so mark them unstable and lower rating on * different sockets, so mark them unstable and lower rating on
* multi-socket SMP systems. * multi-socket SMP systems.
*/ */
if (num_online_cpus() > 1) { if (num_online_cpus() > 1 && !running_on_qemu) {
int cpu; int cpu;
unsigned long cpu0_loc; unsigned long cpu0_loc;
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
......
...@@ -629,7 +629,12 @@ void __init mem_init(void) ...@@ -629,7 +629,12 @@ void __init mem_init(void)
#endif #endif
mem_init_print_info(NULL); mem_init_print_info(NULL);
#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
#if 0
/*
* Do not expose the virtual kernel memory layout to userspace.
* But keep code for debugging purposes.
*/
printk("virtual kernel memory layout:\n" printk("virtual kernel memory layout:\n"
" vmalloc : 0x%px - 0x%px (%4ld MB)\n" " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
" memory : 0x%px - 0x%px (%4ld MB)\n" " memory : 0x%px - 0x%px (%4ld MB)\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment