Commit fedb8da9 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Define mb() and add memory barriers to assembler unlock sequences

For years I thought all parisc machines executed loads and stores in
order. However, Jeff Law recently indicated on gcc-patches that this is
not correct. There are various degrees of out-of-order execution all the
way back to the PA7xxx processor series (hit-under-miss). The PA8xxx
series has full out-of-order execution for both integer operations, and
loads and stores.

This is described in the following article:
http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml

For this reason, we need to define mb() and to insert a memory barrier
before the store unlocking spinlocks. This ensures that all memory
accesses are complete prior to unlocking. The ldcw instruction performs
the same function on entry.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.0+
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 66509a27
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
/* The synchronize caches instruction executes as a nop on systems in
which all memory references are performed in order. */
#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
#if defined(CONFIG_SMP)
#define mb() do { synchronize_caches(); } while (0)
#define rmb() mb()
#define wmb() mb()
#define dma_rmb() mb()
#define dma_wmb() mb()
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define dma_rmb() barrier()
#define dma_wmb() barrier()
#endif
#define __smp_mb() mb()
#define __smp_rmb() mb()
#define __smp_wmb() mb()
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */
...@@ -481,6 +481,8 @@ ...@@ -481,6 +481,8 @@
/* Release pa_tlb_lock lock without reloading lock address. */ /* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp .macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
sync
or,COND(=) %r0,\spc,%r0 or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp) stw \spc,0(\tmp)
#endif #endif
......
...@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local) ...@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_unlock la,flags,tmp .macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
ldi 1,\tmp ldi 1,\tmp
sync
stw \tmp,0(\la) stw \tmp,0(\la)
mtsm \flags mtsm \flags
#endif #endif
......
...@@ -633,6 +633,7 @@ cas_action: ...@@ -633,6 +633,7 @@ cas_action:
sub,<> %r28, %r25, %r0 sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26) 2: stw,ma %r24, 0(%r26)
/* Free lock */ /* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20) stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
/* Clear thread register indicator */ /* Clear thread register indicator */
...@@ -647,6 +648,7 @@ cas_action: ...@@ -647,6 +648,7 @@ cas_action:
3: 3:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
sync
stw %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
...@@ -848,6 +850,7 @@ cas2_action: ...@@ -848,6 +850,7 @@ cas2_action:
cas2_end: cas2_end:
/* Free lock */ /* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20) stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */ /* Enable interrupts */
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
...@@ -858,6 +861,7 @@ cas2_end: ...@@ -858,6 +861,7 @@ cas2_end:
22: 22:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
sync
stw %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
ldo 1(%r0),%r28 ldo 1(%r0),%r28
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment