Commit 86a659ca authored by Dmitriy Vyukov's avatar Dmitriy Vyukov Committed by Russ Cox

runtime: fix data race during Itab hash update/lookup

The data race is on newly published Itab nodes, which are
both unsafely published and unsafely acquired. It can
break on IA-32/Intel64 due to compiler optimizations
(most likely not an issue as of now) and on ARM due to
hardware memory access reorderings.

R=rsc
CC=golang-dev
https://golang.org/cl/4673055
parent dcdaeebd
...@@ -318,6 +318,12 @@ TEXT runtime·casp(SB), 7, $0 ...@@ -318,6 +318,12 @@ TEXT runtime·casp(SB), 7, $0
MOVL $1, AX MOVL $1, AX
RET RET
TEXT runtime·atomicstorep(SB), 7, $0
MOVL 4(SP), BX
MOVL 8(SP), AX
XCHGL AX, 0(BX)
RET
// void jmpdefer(fn, sp); // void jmpdefer(fn, sp);
// called from deferreturn. // called from deferreturn.
// 1. pop the caller // 1. pop the caller
......
...@@ -10,3 +10,10 @@ runtime·atomicload(uint32 volatile* addr) ...@@ -10,3 +10,10 @@ runtime·atomicload(uint32 volatile* addr)
{ {
return *addr; return *addr;
} }
#pragma textflag 7
void*
runtime·atomicloadp(void* volatile* addr)
{
return *addr;
}
...@@ -364,6 +364,12 @@ TEXT runtime·casp(SB), 7, $0 ...@@ -364,6 +364,12 @@ TEXT runtime·casp(SB), 7, $0
MOVL $1, AX MOVL $1, AX
RET RET
TEXT runtime·atomicstorep(SB), 7, $0
MOVQ 8(SP), BX
MOVQ 16(SP), AX
XCHGQ AX, 0(BX)
RET
// void jmpdefer(fn, sp); // void jmpdefer(fn, sp);
// called from deferreturn. // called from deferreturn.
// 1. pop the caller // 1. pop the caller
......
...@@ -10,3 +10,10 @@ runtime·atomicload(uint32 volatile* addr) ...@@ -10,3 +10,10 @@ runtime·atomicload(uint32 volatile* addr)
{ {
return *addr; return *addr;
} }
#pragma textflag 7
void*
runtime·atomicloadp(void* volatile* addr)
{
return *addr;
}
...@@ -10,3 +10,23 @@ runtime·atomicload(uint32 volatile* addr) ...@@ -10,3 +10,23 @@ runtime·atomicload(uint32 volatile* addr)
{ {
return runtime·xadd(addr, 0); return runtime·xadd(addr, 0);
} }
#pragma textflag 7
void*
runtime·atomicloadp(void* volatile* addr)
{
return (void*)runtime·xadd((uint32 volatile*)addr, 0);
}
#pragma textflag 7
void
runtime·atomicstorep(void* volatile* addr, void* v)
{
void *old;
for(;;) {
old = *addr;
if(runtime·casp(addr, old, v))
return;
}
}
...@@ -81,7 +81,7 @@ itab(InterfaceType *inter, Type *type, int32 canfail) ...@@ -81,7 +81,7 @@ itab(InterfaceType *inter, Type *type, int32 canfail)
for(locked=0; locked<2; locked++) { for(locked=0; locked<2; locked++) {
if(locked) if(locked)
runtime·lock(&ifacelock); runtime·lock(&ifacelock);
for(m=hash[h]; m!=nil; m=m->link) { for(m=runtime·atomicloadp(&hash[h]); m!=nil; m=m->link) {
if(m->inter == inter && m->type == type) { if(m->inter == inter && m->type == type) {
if(m->bad) { if(m->bad) {
m = nil; m = nil;
...@@ -145,9 +145,10 @@ search: ...@@ -145,9 +145,10 @@ search:
} }
out: out:
if(!locked)
runtime·panicstring("invalid itab locking");
m->link = hash[h]; m->link = hash[h];
hash[h] = m; runtime·atomicstorep(&hash[h], m);
if(locked)
runtime·unlock(&ifacelock); runtime·unlock(&ifacelock);
if(m->bad) if(m->bad)
return nil; return nil;
......
...@@ -426,6 +426,8 @@ bool runtime·casp(void**, void*, void*); ...@@ -426,6 +426,8 @@ bool runtime·casp(void**, void*, void*);
// this one is actually 'addx', that is, add-and-fetch. // this one is actually 'addx', that is, add-and-fetch.
uint32 runtime·xadd(uint32 volatile*, int32); uint32 runtime·xadd(uint32 volatile*, int32);
uint32 runtime·atomicload(uint32 volatile*); uint32 runtime·atomicload(uint32 volatile*);
void* runtime·atomicloadp(void* volatile*);
void runtime·atomicstorep(void* volatile*, void*);
void runtime·jmpdefer(byte*, void*); void runtime·jmpdefer(byte*, void*);
void runtime·exit1(int32); void runtime·exit1(int32);
void runtime·ready(G*); void runtime·ready(G*);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment