Commit 633b38c5 authored by Cherry Zhang's avatar Cherry Zhang

runtime/internal/atomic: add early nil check on ARM

If nil, fault before taking the lock or calling into the kernel.

Change-Id: I013d78a5f9233c2a9197660025f679940655d384
Reviewed-on: https://go-review.googlesource.com/93636
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent 97124af9
...@@ -109,6 +109,7 @@ func Cas64(addr *uint64, old, new uint64) bool { ...@@ -109,6 +109,7 @@ func Cas64(addr *uint64, old, new uint64) bool {
if uintptr(unsafe.Pointer(addr))&7 != 0 { if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64 *(*int)(nil) = 0 // crash on unaligned uint64
} }
_ = *addr // if nil, fault before taking the lock
var ok bool var ok bool
addrLock(addr).lock() addrLock(addr).lock()
if *addr == old { if *addr == old {
...@@ -124,6 +125,7 @@ func Xadd64(addr *uint64, delta int64) uint64 { ...@@ -124,6 +125,7 @@ func Xadd64(addr *uint64, delta int64) uint64 {
if uintptr(unsafe.Pointer(addr))&7 != 0 { if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64 *(*int)(nil) = 0 // crash on unaligned uint64
} }
_ = *addr // if nil, fault before taking the lock
var r uint64 var r uint64
addrLock(addr).lock() addrLock(addr).lock()
r = *addr + uint64(delta) r = *addr + uint64(delta)
...@@ -137,6 +139,7 @@ func Xchg64(addr *uint64, v uint64) uint64 { ...@@ -137,6 +139,7 @@ func Xchg64(addr *uint64, v uint64) uint64 {
if uintptr(unsafe.Pointer(addr))&7 != 0 { if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64 *(*int)(nil) = 0 // crash on unaligned uint64
} }
_ = *addr // if nil, fault before taking the lock
var r uint64 var r uint64
addrLock(addr).lock() addrLock(addr).lock()
r = *addr r = *addr
...@@ -150,6 +153,7 @@ func Load64(addr *uint64) uint64 { ...@@ -150,6 +153,7 @@ func Load64(addr *uint64) uint64 {
if uintptr(unsafe.Pointer(addr))&7 != 0 { if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64 *(*int)(nil) = 0 // crash on unaligned uint64
} }
_ = *addr // if nil, fault before taking the lock
var r uint64 var r uint64
addrLock(addr).lock() addrLock(addr).lock()
r = *addr r = *addr
...@@ -162,6 +166,7 @@ func Store64(addr *uint64, v uint64) { ...@@ -162,6 +166,7 @@ func Store64(addr *uint64, v uint64) {
if uintptr(unsafe.Pointer(addr))&7 != 0 { if uintptr(unsafe.Pointer(addr))&7 != 0 {
*(*int)(nil) = 0 // crash on unaligned uint64 *(*int)(nil) = 0 // crash on unaligned uint64
} }
_ = *addr // if nil, fault before taking the lock
addrLock(addr).lock() addrLock(addr).lock()
*addr = v *addr = v
addrLock(addr).unlock() addrLock(addr).unlock()
......
...@@ -11,6 +11,9 @@ TEXT cas<>(SB),NOSPLIT,$0 ...@@ -11,6 +11,9 @@ TEXT cas<>(SB),NOSPLIT,$0
TEXT runtimeinternalatomic·Cas(SB),NOSPLIT,$0 TEXT runtimeinternalatomic·Cas(SB),NOSPLIT,$0
MOVW ptr+0(FP), R2 MOVW ptr+0(FP), R2
// trigger potential paging fault here,
// because we don't know how to traceback through __kuser_cmpxchg
MOVW (R2), R0
MOVW old+4(FP), R0 MOVW old+4(FP), R0
loop: loop:
MOVW new+8(FP), R1 MOVW new+8(FP), R1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment