Commit 37ea1826 authored by Zachary Amsden's avatar Zachary Amsden Committed by Brad Fitzpatrick

runtime: catch concurrent stacks more often

If two goroutines are racing on a map, one of them will exit
cleanly, clearing the hashWriting bit, and the other will
likely notice and panic.  If we use XOR instead of OR to
set the bit in the first place, even numbers of racers will
hopefully all see the bit cleared and panic simultaneously,
giving the full set of available stacks.  If a third racer
sneaks in, we are no worse than the current code, and
the generated code should be no more expensive.

In practice, this catches most racing goroutines even in
very tight races.  See the demonstration program posted
on https://github.com/golang/go/issues/26703 for an example.

Fixes #26703

Change-Id: Idad17841a3127c24bd0a659b754734f70e307434
Reviewed-on: https://go-review.googlesource.com/126936
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent 773e8946
...@@ -567,7 +567,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -567,7 +567,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
// Set hashWriting after calling alg.hash, since alg.hash may panic, // Set hashWriting after calling alg.hash, since alg.hash may panic,
// in which case we have not actually done a write. // in which case we have not actually done a write.
h.flags |= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
...@@ -679,7 +679,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { ...@@ -679,7 +679,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
// Set hashWriting after calling alg.hash, since alg.hash may panic, // Set hashWriting after calling alg.hash, since alg.hash may panic,
// in which case we have not actually done a write (delete). // in which case we have not actually done a write (delete).
h.flags |= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
...@@ -921,7 +921,7 @@ func mapclear(t *maptype, h *hmap) { ...@@ -921,7 +921,7 @@ func mapclear(t *maptype, h *hmap) {
throw("concurrent map writes") throw("concurrent map writes")
} }
h.flags |= hashWriting h.flags ^= hashWriting
h.flags &^= sameSizeGrow h.flags &^= sameSizeGrow
h.oldbuckets = nil h.oldbuckets = nil
......
...@@ -103,7 +103,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -103,7 +103,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
...@@ -189,7 +189,7 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer ...@@ -189,7 +189,7 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
...@@ -276,7 +276,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) { ...@@ -276,7 +276,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
......
...@@ -103,7 +103,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -103,7 +103,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
...@@ -189,7 +189,7 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer ...@@ -189,7 +189,7 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
...@@ -276,7 +276,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) { ...@@ -276,7 +276,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
......
...@@ -202,7 +202,7 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { ...@@ -202,7 +202,7 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
...@@ -294,7 +294,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { ...@@ -294,7 +294,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment