Commit e03bce15 authored by Peter Collingbourne's avatar Peter Collingbourne Committed by Russ Cox

cmd/cc, runtime: eliminate use of the unnamed substructure C extension

Eliminating use of this extension makes it easier to port the Go runtime
to other compilers. This CL also disables the extension in cc to prevent
accidental use.

LGTM=rsc, khr
R=rsc, aram, khr, dvyukov
CC=axwalk, golang-codereviews
https://golang.org/cl/106790044
parent 6cee4d3e
......@@ -1481,12 +1481,9 @@ edecl(int c, Type *t, Sym *s)
{
Type *t1;
if(s == S) {
if(!typesu[t->etype])
diag(Z, "unnamed structure element must be struct/union");
if(c != CXXX)
diag(Z, "unnamed structure element cannot have class");
} else
if(s == S)
diag(Z, "unnamed structure elements not supported");
else
if(c != CXXX)
diag(Z, "structure element cannot have class: %s", s->name);
t1 = t;
......
......@@ -11,7 +11,7 @@
typedef struct Callbacks Callbacks;
struct Callbacks {
Lock;
Lock lock;
WinCallbackContext* ctxt[cb_max];
int32 n;
};
......@@ -44,13 +44,13 @@ runtime·compilecallback(Eface fn, bool cleanstack)
argsize += sizeof(uintptr);
}
runtime·lock(&cbs);
runtime·lock(&cbs.lock);
if(runtime·cbctxts == nil)
runtime·cbctxts = &(cbs.ctxt[0]);
n = cbs.n;
for(i=0; i<n; i++) {
if(cbs.ctxt[i]->gobody == fn.data && cbs.ctxt[i]->cleanstack == cleanstack) {
runtime·unlock(&cbs);
runtime·unlock(&cbs.lock);
// runtime·callbackasm is just a series of CALL instructions
// (each is 5 bytes long), and we want callback to arrive at
// correspondent call instruction instead of start of
......@@ -70,7 +70,7 @@ runtime·compilecallback(Eface fn, bool cleanstack)
c->restorestack = 0;
cbs.ctxt[n] = c;
cbs.n++;
runtime·unlock(&cbs);
runtime·unlock(&cbs.lock);
// as before
return (byte*)runtime·callbackasm + n * 5;
......
......@@ -136,7 +136,7 @@ chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
mysg.releasetime = -1;
}
runtime·lock(c);
runtime·lock(&c->lock);
if(raceenabled)
runtime·racereadpc(c, pc, chansend);
if(c->closed)
......@@ -149,7 +149,7 @@ chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
if(sg != nil) {
if(raceenabled)
racesync(c, sg);
runtime·unlock(c);
runtime·unlock(&c->lock);
gp = sg->g;
gp->param = sg;
......@@ -162,7 +162,7 @@ chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
}
if(!block) {
runtime·unlock(c);
runtime·unlock(&c->lock);
return false;
}
......@@ -171,10 +171,10 @@ chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc)
mysg.selectdone = nil;
g->param = nil;
enqueue(&c->sendq, &mysg);
runtime·parkunlock(c, "chan send");
runtime·parkunlock(&c->lock, "chan send");
if(g->param == nil) {
runtime·lock(c);
runtime·lock(&c->lock);
if(!c->closed)
runtime·throw("chansend: spurious wakeup");
goto closed;
......@@ -191,16 +191,16 @@ asynch:
if(c->qcount >= c->dataqsiz) {
if(!block) {
runtime·unlock(c);
runtime·unlock(&c->lock);
return false;
}
mysg.g = g;
mysg.elem = nil;
mysg.selectdone = nil;
enqueue(&c->sendq, &mysg);
runtime·parkunlock(c, "chan send");
runtime·parkunlock(&c->lock, "chan send");
runtime·lock(c);
runtime·lock(&c->lock);
goto asynch;
}
......@@ -217,18 +217,18 @@ asynch:
sg = dequeue(&c->recvq);
if(sg != nil) {
gp = sg->g;
runtime·unlock(c);
runtime·unlock(&c->lock);
if(sg->releasetime)
sg->releasetime = runtime·cputicks();
runtime·ready(gp);
} else
runtime·unlock(c);
runtime·unlock(&c->lock);
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
return true;
closed:
runtime·unlock(c);
runtime·unlock(&c->lock);
runtime·panicstring("send on closed channel");
return false; // not reached
}
......@@ -262,7 +262,7 @@ chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
mysg.releasetime = -1;
}
runtime·lock(c);
runtime·lock(&c->lock);
if(c->dataqsiz > 0)
goto asynch;
......@@ -273,7 +273,7 @@ chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
if(sg != nil) {
if(raceenabled)
racesync(c, sg);
runtime·unlock(c);
runtime·unlock(&c->lock);
if(ep != nil)
c->elemtype->alg->copy(c->elemsize, ep, sg->elem);
......@@ -289,7 +289,7 @@ chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
}
if(!block) {
runtime·unlock(c);
runtime·unlock(&c->lock);
return false;
}
......@@ -298,10 +298,10 @@ chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
mysg.selectdone = nil;
g->param = nil;
enqueue(&c->recvq, &mysg);
runtime·parkunlock(c, "chan receive");
runtime·parkunlock(&c->lock, "chan receive");
if(g->param == nil) {
runtime·lock(c);
runtime·lock(&c->lock);
if(!c->closed)
runtime·throw("chanrecv: spurious wakeup");
goto closed;
......@@ -319,7 +319,7 @@ asynch:
goto closed;
if(!block) {
runtime·unlock(c);
runtime·unlock(&c->lock);
if(received != nil)
*received = false;
return false;
......@@ -328,9 +328,9 @@ asynch:
mysg.elem = nil;
mysg.selectdone = nil;
enqueue(&c->recvq, &mysg);
runtime·parkunlock(c, "chan receive");
runtime·parkunlock(&c->lock, "chan receive");
runtime·lock(c);
runtime·lock(&c->lock);
goto asynch;
}
......@@ -349,12 +349,12 @@ asynch:
sg = dequeue(&c->sendq);
if(sg != nil) {
gp = sg->g;
runtime·unlock(c);
runtime·unlock(&c->lock);
if(sg->releasetime)
sg->releasetime = runtime·cputicks();
runtime·ready(gp);
} else
runtime·unlock(c);
runtime·unlock(&c->lock);
if(received != nil)
*received = true;
......@@ -369,7 +369,7 @@ closed:
*received = false;
if(raceenabled)
runtime·raceacquire(c);
runtime·unlock(c);
runtime·unlock(&c->lock);
if(mysg.releasetime > 0)
runtime·blockevent(mysg.releasetime - t0, 2);
return true;
......@@ -617,7 +617,7 @@ sellock(Select *sel)
c0 = sel->lockorder[i];
if(c0 && c0 != c) {
c = sel->lockorder[i];
runtime·lock(c);
runtime·lock(&c->lock);
}
}
}
......@@ -645,7 +645,7 @@ selunlock(Select *sel)
c = sel->lockorder[i];
if(i>0 && sel->lockorder[i-1] == c)
continue; // will unlock it on the next iteration
runtime·unlock(c);
runtime·unlock(&c->lock);
}
}
......@@ -1067,9 +1067,9 @@ closechan(Hchan *c, void *pc)
if(c == nil)
runtime·panicstring("close of nil channel");
runtime·lock(c);
runtime·lock(&c->lock);
if(c->closed) {
runtime·unlock(c);
runtime·unlock(&c->lock);
runtime·panicstring("close of closed channel");
}
......@@ -1104,7 +1104,7 @@ closechan(Hchan *c, void *pc)
runtime·ready(gp);
}
runtime·unlock(c);
runtime·unlock(&c->lock);
}
func reflect·chanlen(c *Hchan) (len int) {
......
......@@ -38,7 +38,7 @@ struct Hchan
uintgo recvx; // receive index
WaitQ recvq; // list of recv waiters
WaitQ sendq; // list of send waiters
Lock;
Lock lock;
};
// Buffer follows Hchan immediately in memory.
......
......@@ -21,9 +21,6 @@ struct Ureg
uint32 pc; /* pc */
uint32 cs; /* old context */
uint32 flags; /* old flags */
union {
uint32 usp;
uint32 sp;
};
uint32 sp;
uint32 ss; /* old stack segment */
};
......@@ -515,7 +515,7 @@ dumproots(void)
if(sp->kind != KindSpecialFinalizer)
continue;
spf = (SpecialFinalizer*)sp;
p = (byte*)((s->start << PageShift) + spf->offset);
p = (byte*)((s->start << PageShift) + spf->special.offset);
dumpfinalizer(p, spf->fn, spf->fint, spf->ot);
}
}
......@@ -695,7 +695,7 @@ dumpmemprof(void)
if(sp->kind != KindSpecialProfile)
continue;
spp = (SpecialProfile*)sp;
p = (byte*)((s->start << PageShift) + spp->offset);
p = (byte*)((s->start << PageShift) + spp->special.offset);
dumpint(TagAllocSample);
dumpint((uintptr)p);
dumpint((uintptr)spp->b);
......
......@@ -42,7 +42,7 @@ itab(InterfaceType *inter, Type *type, int32 canfail)
}
// compiler has provided some good hash codes for us.
h = inter->hash;
h = inter->typ.hash;
h += 17 * type->hash;
// TODO(rsc): h += 23 * x->mhash ?
h %= nelem(hash);
......@@ -98,7 +98,7 @@ search:
throw:
// didn't find method
runtime·newTypeAssertionError(
nil, type->string, inter->string,
nil, type->string, inter->typ.string,
iname, &err);
if(locked)
runtime·unlock(&ifacelock);
......@@ -231,7 +231,7 @@ assertI2Tret(Type *t, Iface i, byte *ret)
}
if(tab->type != t) {
runtime·newTypeAssertionError(
tab->inter->string, tab->type->string, t->string,
tab->inter->typ.string, tab->type->string, t->string,
nil, &err);
runtime·panic(err);
}
......@@ -332,7 +332,7 @@ func assertI2E(inter *InterfaceType, i Iface) (ret Eface) {
if(tab == nil) {
// explicit conversions require non-nil interface value.
runtime·newTypeAssertionError(
nil, nil, inter->string,
nil, nil, inter->typ.string,
nil, &err);
runtime·panic(err);
}
......@@ -377,7 +377,7 @@ runtime·ifaceI2I(InterfaceType *inter, Iface i, Iface *ret)
if(tab == nil) {
// explicit conversions require non-nil interface value.
runtime·newTypeAssertionError(
nil, nil, inter->string,
nil, nil, inter->typ.string,
nil, &err);
runtime·panic(err);
}
......@@ -414,7 +414,7 @@ runtime·ifaceE2I(InterfaceType *inter, Eface e, Iface *ret)
if(t == nil) {
// explicit conversions require non-nil interface value.
runtime·newTypeAssertionError(
nil, nil, inter->string,
nil, nil, inter->typ.string,
nil, &err);
runtime·panic(err);
}
......@@ -462,7 +462,7 @@ func assertE2E(inter *InterfaceType, e Eface) (ret Eface) {
if(t == nil) {
// explicit conversions require non-nil interface value.
runtime·newTypeAssertionError(
nil, nil, inter->string,
nil, nil, inter->typ.string,
nil, &err);
runtime·panic(err);
}
......
......@@ -45,9 +45,9 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
g->m->mcache->local_nlookup++;
if (sizeof(void*) == 4 && g->m->mcache->local_nlookup >= (1<<30)) {
// purge cache stats to prevent overflow
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
runtime·purgecachedstats(g->m->mcache);
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
}
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
......@@ -341,7 +341,7 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
static struct
{
Lock;
Lock lock;
byte* pos;
byte* end;
} persistent;
......@@ -370,19 +370,19 @@ runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat)
align = 8;
if(size >= PersistentAllocMaxBlock)
return runtime·SysAlloc(size, stat);
runtime·lock(&persistent);
runtime·lock(&persistent.lock);
persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
if(persistent.pos + size > persistent.end) {
persistent.pos = runtime·SysAlloc(PersistentAllocChunk, &mstats.other_sys);
if(persistent.pos == nil) {
runtime·unlock(&persistent);
runtime·unlock(&persistent.lock);
runtime·throw("runtime: cannot allocate memory");
}
persistent.end = persistent.pos + PersistentAllocChunk;
}
p = persistent.pos;
persistent.pos += size;
runtime·unlock(&persistent);
runtime·unlock(&persistent.lock);
if(stat != &mstats.other_sys) {
// reaccount the allocation against provided stat
runtime·xadd64(stat, size);
......
......@@ -370,7 +370,7 @@ struct Special
typedef struct SpecialFinalizer SpecialFinalizer;
struct SpecialFinalizer
{
Special;
Special special;
FuncVal* fn;
uintptr nret;
Type* fint;
......@@ -382,7 +382,7 @@ typedef struct Bucket Bucket; // from mprof.h
typedef struct SpecialProfile SpecialProfile;
struct SpecialProfile
{
Special;
Special special;
Bucket* b;
};
......@@ -438,7 +438,7 @@ void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in
// Central list of free objects of a given size.
struct MCentral
{
Lock;
Lock lock;
int32 sizeclass;
MSpan nonempty; // list of spans with a free object
MSpan empty; // list of spans with no free objects (or cached in an MCache)
......@@ -454,7 +454,7 @@ bool runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, ML
// but all the other global data is here too.
struct MHeap
{
Lock;
Lock lock;
MSpan free[MaxMHeapList]; // free lists of given length
MSpan freelarge; // free lists length >= MaxMHeapList
MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
......@@ -483,7 +483,7 @@ struct MHeap
// spaced CacheLineSize bytes apart, so that each MCentral.Lock
// gets its own cache line.
struct {
MCentral;
MCentral mcentral;
byte pad[CacheLineSize];
} central[NumSizeClasses];
......
......@@ -22,9 +22,9 @@ runtime·allocmcache(void)
MCache *c;
int32 i;
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
runtime·memclr((byte*)c, sizeof(*c));
for(i = 0; i < NumSizeClasses; i++)
c->alloc[i] = &emptymspan;
......@@ -45,10 +45,10 @@ freemcache(MCache *c)
runtime·MCache_ReleaseAll(c);
runtime·stackcache_clear(c);
runtime·gcworkbuffree(c->gcworkbuf);
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
runtime·purgecachedstats(c);
runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
}
static void
......@@ -85,7 +85,7 @@ runtime·MCache_Refill(MCache *c, int32 sizeclass)
s->incache = false;
// Get a new cached span from the central lists.
s = runtime·MCentral_CacheSpan(&runtime·mheap.central[sizeclass]);
s = runtime·MCentral_CacheSpan(&runtime·mheap.central[sizeclass].mcentral);
if(s == nil)
runtime·throw("out of memory");
if(s->freelist == nil) {
......@@ -106,7 +106,7 @@ runtime·MCache_ReleaseAll(MCache *c)
for(i=0; i<NumSizeClasses; i++) {
s = c->alloc[i];
if(s != &emptymspan) {
runtime·MCentral_UncacheSpan(&runtime·mheap.central[i], s);
runtime·MCentral_UncacheSpan(&runtime·mheap.central[i].mcentral, s);
c->alloc[i] = &emptymspan;
}
}
......
......@@ -37,14 +37,14 @@ runtime·MCentral_CacheSpan(MCentral *c)
int32 cap, n;
uint32 sg;
runtime·lock(c);
runtime·lock(&c->lock);
sg = runtime·mheap.sweepgen;
retry:
for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
runtime·unlock(c);
runtime·unlock(&c->lock);
runtime·MSpan_Sweep(s);
runtime·lock(c);
runtime·lock(&c->lock);
// the span could have been moved to heap, retry
goto retry;
}
......@@ -63,9 +63,9 @@ retry:
runtime·MSpanList_Remove(s);
// swept spans are at the end of the list
runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(c);
runtime·unlock(&c->lock);
runtime·MSpan_Sweep(s);
runtime·lock(c);
runtime·lock(&c->lock);
// the span could be moved to nonempty or heap, retry
goto retry;
}
......@@ -80,7 +80,7 @@ retry:
// Replenish central list if empty.
if(!MCentral_Grow(c)) {
runtime·unlock(c);
runtime·unlock(&c->lock);
return nil;
}
goto retry;
......@@ -95,7 +95,7 @@ havespan:
runtime·MSpanList_Remove(s);
runtime·MSpanList_InsertBack(&c->empty, s);
s->incache = true;
runtime·unlock(c);
runtime·unlock(&c->lock);
return s;
}
......@@ -105,7 +105,7 @@ runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
{
int32 cap, n;
runtime·lock(c);
runtime·lock(&c->lock);
s->incache = false;
......@@ -118,7 +118,7 @@ runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
runtime·MSpanList_Remove(s);
runtime·MSpanList_Insert(&c->nonempty, s);
}
runtime·unlock(c);
runtime·unlock(&c->lock);
}
// Free n objects from a span s back into the central free list c.
......@@ -130,7 +130,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
{
if(s->incache)
runtime·throw("freespan into cached span");
runtime·lock(c);
runtime·lock(&c->lock);
// Move to nonempty if necessary.
if(s->freelist == nil) {
......@@ -150,7 +150,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);
if(s->ref != 0) {
runtime·unlock(c);
runtime·unlock(&c->lock);
return false;
}
......@@ -158,7 +158,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
runtime·MSpanList_Remove(s);
s->needzero = 1;
s->freelist = nil;
runtime·unlock(c);
runtime·unlock(&c->lock);
runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 0);
return true;
......@@ -174,14 +174,14 @@ MCentral_Grow(MCentral *c)
byte *p;
MSpan *s;
runtime·unlock(c);
runtime·unlock(&c->lock);
npages = runtime·class_to_allocnpages[c->sizeclass];
size = runtime·class_to_size[c->sizeclass];
n = (npages << PageShift) / size;
s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0, 1);
if(s == nil) {
// TODO(rsc): Log out of memory
runtime·lock(c);
runtime·lock(&c->lock);
return false;
}
......@@ -198,7 +198,7 @@ MCentral_Grow(MCentral *c)
*tailp = nil;
runtime·markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
runtime·lock(c);
runtime·lock(&c->lock);
runtime·MSpanList_Insert(&c->nonempty, s);
return true;
}
......@@ -539,7 +539,7 @@ markroot(ParFor *desc, uint32 i)
// retain everything it points to.
spf = (SpecialFinalizer*)sp;
// A finalizer can be set for an inner byte of an object, find object beginning.
p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
scanblock(p, s->elemsize, nil);
scanblock((void*)&spf->fn, PtrSize, ScanConservatively);
}
......@@ -1043,7 +1043,7 @@ runtime·MSpan_Sweep(MSpan *s)
c->local_nsmallfree[cl] += nfree;
c->local_cachealloc -= nfree * size;
runtime·xadd64(&mstats.next_gc, -(uint64)(nfree * size * (runtime·gcpercent + 100)/100));
res = runtime·MCentral_FreeSpan(&runtime·mheap.central[cl], s, nfree, head.next, end);
res = runtime·MCentral_FreeSpan(&runtime·mheap.central[cl].mcentral, s, nfree, head.next, end);
// MCentral_FreeSpan updates sweepgen
}
return res;
......@@ -1308,10 +1308,10 @@ runtime·gc(int32 force)
return;
if(runtime·gcpercent == GcpercentUnknown) { // first time through
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
if(runtime·gcpercent == GcpercentUnknown)
runtime·gcpercent = runtime·readgogc();
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
}
if(runtime·gcpercent < 0)
return;
......@@ -1560,7 +1560,7 @@ runtime∕debug·readGCStats(Slice *pauses)
// Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
p = (uint64*)pauses->array;
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
n = mstats.numgc;
if(n > nelem(mstats.pause_ns))
n = nelem(mstats.pause_ns);
......@@ -1575,7 +1575,7 @@ runtime∕debug·readGCStats(Slice *pauses)
p[n] = mstats.last_gc;
p[n+1] = mstats.numgc;
p[n+2] = mstats.pause_total_ns;
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
pauses->len = n+3;
}
......@@ -1583,14 +1583,14 @@ int32
runtime·setgcpercent(int32 in) {
int32 out;
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
if(runtime·gcpercent == GcpercentUnknown)
runtime·gcpercent = runtime·readgogc();
out = runtime·gcpercent;
if(in < 0)
in = -1;
runtime·gcpercent = in;
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
return out;
}
......@@ -1670,11 +1670,11 @@ runfinq(void)
} else if(((InterfaceType*)f->fint)->mhdr.len == 0) {
// convert to empty interface
ef = (Eface*)frame;
ef->type = f->ot;
ef->type = &f->ot->typ;
ef->data = f->arg;
} else {
// convert to interface with methods, via empty interface.
ef1.type = f->ot;
ef1.type = &f->ot->typ;
ef1.data = f->arg;
if(!runtime·ifaceE2I2((InterfaceType*)f->fint, ef1, (Iface*)frame))
runtime·throw("invalid type conversion in runfinq");
......
......@@ -70,7 +70,7 @@ runtime·MHeap_Init(MHeap *h)
runtime·MSpanList_Init(&h->freelarge);
runtime·MSpanList_Init(&h->busylarge);
for(i=0; i<nelem(h->central); i++)
runtime·MCentral_Init(&h->central[i], i);
runtime·MCentral_Init(&h->central[i].mcentral, i);
}
void
......@@ -106,9 +106,9 @@ retry:
runtime·MSpanList_Remove(s);
// swept spans are at the end of the list
runtime·MSpanList_InsertBack(list, s);
runtime·unlock(h);
runtime·unlock(&h->lock);
n += runtime·MSpan_Sweep(s);
runtime·lock(h);
runtime·lock(&h->lock);
if(n >= npages)
return n;
// the span could have been moved elsewhere
......@@ -153,7 +153,7 @@ MHeap_Reclaim(MHeap *h, uintptr npage)
}
// Now sweep everything that is not yet swept.
runtime·unlock(h);
runtime·unlock(&h->lock);
for(;;) {
n = runtime·sweepone();
if(n == -1) // all spans are swept
......@@ -162,7 +162,7 @@ MHeap_Reclaim(MHeap *h, uintptr npage)
if(reclaimed >= npage)
break;
}
runtime·lock(h);
runtime·lock(&h->lock);
}
// Allocate a new span of npage pages from the heap for GC'd memory
......@@ -174,7 +174,7 @@ mheap_alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large)
if(g != g->m->g0)
runtime·throw("mheap_alloc not on M stack");
runtime·lock(h);
runtime·lock(&h->lock);
// To prevent excessive heap growth, before allocating n pages
// we need to sweep and reclaim at least n pages.
......@@ -207,7 +207,7 @@ mheap_alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large)
runtime·MSpanList_InsertBack(&h->busylarge, s);
}
}
runtime·unlock(h);
runtime·unlock(&h->lock);
return s;
}
......@@ -259,7 +259,7 @@ runtime·MHeap_AllocStack(MHeap *h, uintptr npage)
if(g != g->m->g0)
runtime·throw("mheap_allocstack not on M stack");
runtime·lock(h);
runtime·lock(&h->lock);
s = MHeap_AllocSpanLocked(h, npage);
if(s != nil) {
s->state = MSpanStack;
......@@ -267,7 +267,7 @@ runtime·MHeap_AllocStack(MHeap *h, uintptr npage)
s->ref = 0;
mstats.stacks_inuse += s->npages<<PageShift;
}
runtime·unlock(h);
runtime·unlock(&h->lock);
return s;
}
......@@ -460,7 +460,7 @@ mheap_free(MHeap *h, MSpan *s, int32 acct)
{
if(g != g->m->g0)
runtime·throw("mheap_free not on M stack");
runtime·lock(h);
runtime·lock(&h->lock);
mstats.heap_alloc += g->m->mcache->local_cachealloc;
g->m->mcache->local_cachealloc = 0;
if(acct) {
......@@ -468,7 +468,7 @@ mheap_free(MHeap *h, MSpan *s, int32 acct)
mstats.heap_objects--;
}
MHeap_FreeSpanLocked(h, s);
runtime·unlock(h);
runtime·unlock(&h->lock);
}
static void
......@@ -504,10 +504,10 @@ runtime·MHeap_FreeStack(MHeap *h, MSpan *s)
if(g != g->m->g0)
runtime·throw("mheap_freestack not on M stack");
s->needzero = 1;
runtime·lock(h);
runtime·lock(&h->lock);
mstats.stacks_inuse -= s->npages<<PageShift;
MHeap_FreeSpanLocked(h, s);
runtime·unlock(h);
runtime·unlock(&h->lock);
}
static void
......@@ -626,9 +626,9 @@ scavenge(int32 k, uint64 now, uint64 limit)
static void
scavenge_m(G *gp)
{
runtime·lock(&runtime·mheap);
runtime·lock(&runtime·mheap.lock);
scavenge(g->m->scalararg[0], g->m->scalararg[1], g->m->scalararg[2]);
runtime·unlock(&runtime·mheap);
runtime·unlock(&runtime·mheap.lock);
runtime·gogo(&gp->sched);
}
......@@ -865,12 +865,12 @@ runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot
runtime·lock(&runtime·mheap.speciallock);
s = runtime·FixAlloc_Alloc(&runtime·mheap.specialfinalizeralloc);
runtime·unlock(&runtime·mheap.speciallock);
s->kind = KindSpecialFinalizer;
s->special.kind = KindSpecialFinalizer;
s->fn = f;
s->nret = nret;
s->fint = fint;
s->ot = ot;
if(addspecial(p, s))
if(addspecial(p, &s->special))
return true;
// There was an old finalizer
......@@ -903,9 +903,9 @@ runtime·setprofilebucket(void *p, Bucket *b)
runtime·lock(&runtime·mheap.speciallock);
s = runtime·FixAlloc_Alloc(&runtime·mheap.specialprofilealloc);
runtime·unlock(&runtime·mheap.speciallock);
s->kind = KindSpecialProfile;
s->special.kind = KindSpecialProfile;
s->b = b;
if(!addspecial(p, s))
if(!addspecial(p, &s->special))
runtime·throw("setprofilebucket: profile already set");
}
......
......@@ -92,20 +92,20 @@ MProf_GC(void)
Bucket *b;
for(b=mbuckets; b; b=b->allnext) {
b->allocs += b->prev_allocs;
b->frees += b->prev_frees;
b->alloc_bytes += b->prev_alloc_bytes;
b->free_bytes += b->prev_free_bytes;
b->prev_allocs = b->recent_allocs;
b->prev_frees = b->recent_frees;
b->prev_alloc_bytes = b->recent_alloc_bytes;
b->prev_free_bytes = b->recent_free_bytes;
b->recent_allocs = 0;
b->recent_frees = 0;
b->recent_alloc_bytes = 0;
b->recent_free_bytes = 0;
b->data.mp.allocs += b->data.mp.prev_allocs;
b->data.mp.frees += b->data.mp.prev_frees;
b->data.mp.alloc_bytes += b->data.mp.prev_alloc_bytes;
b->data.mp.free_bytes += b->data.mp.prev_free_bytes;
b->data.mp.prev_allocs = b->data.mp.recent_allocs;
b->data.mp.prev_frees = b->data.mp.recent_frees;
b->data.mp.prev_alloc_bytes = b->data.mp.recent_alloc_bytes;
b->data.mp.prev_free_bytes = b->data.mp.recent_free_bytes;
b->data.mp.recent_allocs = 0;
b->data.mp.recent_frees = 0;
b->data.mp.recent_alloc_bytes = 0;
b->data.mp.recent_free_bytes = 0;
}
}
......@@ -129,8 +129,8 @@ runtime·MProf_Malloc(void *p, uintptr size)
nstk = runtime·callers(1, stk, nelem(stk));
runtime·lock(&proflock);
b = stkbucket(MProf, size, stk, nstk, true);
b->recent_allocs++;
b->recent_alloc_bytes += size;
b->data.mp.recent_allocs++;
b->data.mp.recent_alloc_bytes += size;
runtime·unlock(&proflock);
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
......@@ -160,8 +160,8 @@ runtime·mprofMalloc_m(void)
nstk = runtime·gcallers(g->m->curg, 1, stk, nelem(stk));
runtime·lock(&proflock);
b = stkbucket(MProf, size, stk, nstk, true);
b->recent_allocs++;
b->recent_alloc_bytes += size;
b->data.mp.recent_allocs++;
b->data.mp.recent_alloc_bytes += size;
runtime·unlock(&proflock);
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
......@@ -177,11 +177,11 @@ runtime·MProf_Free(Bucket *b, uintptr size, bool freed)
{
runtime·lock(&proflock);
if(freed) {
b->recent_frees++;
b->recent_free_bytes += size;
b->data.mp.recent_frees++;
b->data.mp.recent_free_bytes += size;
} else {
b->prev_frees++;
b->prev_free_bytes += size;
b->data.mp.prev_frees++;
b->data.mp.prev_free_bytes += size;
}
runtime·unlock(&proflock);
}
......@@ -221,8 +221,8 @@ runtime·blockevent(int64 cycles, int32 skip)
nstk = runtime·callers(skip, stk, nelem(stk));
runtime·lock(&proflock);
b = stkbucket(BProf, 0, stk, nstk, true);
b->count++;
b->cycles += cycles;
b->data.bp.count++;
b->data.bp.cycles += cycles;
runtime·unlock(&proflock);
}
......@@ -242,10 +242,10 @@ record(Record *r, Bucket *b)
{
int32 i;
r->alloc_bytes = b->alloc_bytes;
r->free_bytes = b->free_bytes;
r->alloc_objects = b->allocs;
r->free_objects = b->frees;
r->alloc_bytes = b->data.mp.alloc_bytes;
r->free_bytes = b->data.mp.free_bytes;
r->alloc_objects = b->data.mp.allocs;
r->free_objects = b->data.mp.frees;
for(i=0; i<b->nstk && i<nelem(r->stk); i++)
r->stk[i] = b->stk[i];
for(; i<nelem(r->stk); i++)
......@@ -261,9 +261,9 @@ func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
n = 0;
clear = true;
for(b=mbuckets; b; b=b->allnext) {
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
if(include_inuse_zero || b->data.mp.alloc_bytes != b->data.mp.free_bytes)
n++;
if(b->allocs != 0 || b->frees != 0)
if(b->data.mp.allocs != 0 || b->data.mp.frees != 0)
clear = false;
}
if(clear) {
......@@ -275,7 +275,7 @@ func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
MProf_GC();
n = 0;
for(b=mbuckets; b; b=b->allnext)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
if(include_inuse_zero || b->data.mp.alloc_bytes != b->data.mp.free_bytes)
n++;
}
ok = false;
......@@ -283,7 +283,7 @@ func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
ok = true;
r = (Record*)p.array;
for(b=mbuckets; b; b=b->allnext)
if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
if(include_inuse_zero || b->data.mp.alloc_bytes != b->data.mp.free_bytes)
record(r++, b);
}
runtime·unlock(&proflock);
......@@ -296,7 +296,7 @@ runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, u
runtime·lock(&proflock);
for(b=mbuckets; b; b=b->allnext) {
callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
callback(b, b->nstk, b->stk, b->size, b->data.mp.allocs, b->data.mp.frees);
}
runtime·unlock(&proflock);
}
......@@ -323,8 +323,8 @@ func BlockProfile(p Slice) (n int, ok bool) {
ok = true;
r = (BRecord*)p.array;
for(b=bbuckets; b; b=b->allnext, r++) {
r->count = b->count;
r->cycles = b->cycles;
r->count = b->data.bp.count;
r->cycles = b->data.bp.cycles;
for(i=0; i<b->nstk && i<nelem(r->stk); i++)
r->stk[i] = b->stk[i];
for(; i<nelem(r->stk); i++)
......
......@@ -42,13 +42,13 @@ struct Bucket
uintptr recent_alloc_bytes;
uintptr recent_free_bytes;
};
} mp;
struct // typ == BProf
{
int64 count;
int64 cycles;
};
};
} bp;
} data;
uintptr hash; // hash of size + stk
uintptr size;
uintptr nstk;
......
......@@ -48,7 +48,7 @@ struct PollDesc
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification)
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
// in a lock-free way by all operations.
Lock; // protectes the following fields
Lock lock; // protectes the following fields
uintptr fd;
bool closing;
uintptr seq; // protects from stale timers and ready notifications
......@@ -63,7 +63,7 @@ struct PollDesc
static struct
{
Lock;
Lock lock;
PollDesc* first;
// PollDesc objects must be type-stable,
// because we can get ready notification from epoll/kqueue
......@@ -95,7 +95,7 @@ func runtime_pollServerInit() {
func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
pd = allocPollDesc();
runtime·lock(pd);
runtime·lock(&pd->lock);
if(pd->wg != nil && pd->wg != READY)
runtime·throw("runtime_pollOpen: blocked write on free descriptor");
if(pd->rg != nil && pd->rg != READY)
......@@ -107,7 +107,7 @@ func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
pd->rd = 0;
pd->wg = nil;
pd->wd = 0;
runtime·unlock(pd);
runtime·unlock(&pd->lock);
errno = runtime·netpollopen(fd, pd);
}
......@@ -120,10 +120,10 @@ func runtime_pollClose(pd *PollDesc) {
if(pd->rg != nil && pd->rg != READY)
runtime·throw("runtime_pollClose: blocked read on closing descriptor");
runtime·netpollclose(pd->fd);
runtime·lock(&pollcache);
runtime·lock(&pollcache.lock);
pd->link = pollcache.first;
pollcache.first = pd;
runtime·unlock(&pollcache);
runtime·unlock(&pollcache.lock);
}
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
......@@ -164,9 +164,9 @@ func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
G *rg, *wg;
runtime·lock(pd);
runtime·lock(&pd->lock);
if(pd->closing) {
runtime·unlock(pd);
runtime·unlock(&pd->lock);
return;
}
pd->seq++; // invalidate current timers
......@@ -218,7 +218,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
rg = netpollunblock(pd, 'r', false);
if(pd->wd < 0)
wg = netpollunblock(pd, 'w', false);
runtime·unlock(pd);
runtime·unlock(&pd->lock);
if(rg)
runtime·ready(rg);
if(wg)
......@@ -228,7 +228,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
func runtime_pollUnblock(pd *PollDesc) {
G *rg, *wg;
runtime·lock(pd);
runtime·lock(&pd->lock);
if(pd->closing)
runtime·throw("runtime_pollUnblock: already closing");
pd->closing = true;
......@@ -244,7 +244,7 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime·deltimer(&pd->wt);
pd->wt.fv = nil;
}
runtime·unlock(pd);
runtime·unlock(&pd->lock);
if(rg)
runtime·ready(rg);
if(wg)
......@@ -272,13 +272,13 @@ runtime·netpollclosing(PollDesc *pd)
void
runtime·netpolllock(PollDesc *pd)
{
runtime·lock(pd);
runtime·lock(&pd->lock);
}
void
runtime·netpollunlock(PollDesc *pd)
{
runtime·unlock(pd);
runtime·unlock(&pd->lock);
}
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
......@@ -396,10 +396,10 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write)
// If it's stale, ignore the timer event.
seq = (uintptr)arg.type;
rg = wg = nil;
runtime·lock(pd);
runtime·lock(&pd->lock);
if(seq != pd->seq) {
// The descriptor was reused or timers were reset.
runtime·unlock(pd);
runtime·unlock(&pd->lock);
return;
}
if(read) {
......@@ -416,7 +416,7 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write)
runtime·atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false);
}
runtime·unlock(pd);
runtime·unlock(&pd->lock);
if(rg)
runtime·ready(rg);
if(wg)
......@@ -447,7 +447,7 @@ allocPollDesc(void)
PollDesc *pd;
uint32 i, n;
runtime·lock(&pollcache);
runtime·lock(&pollcache.lock);
if(pollcache.first == nil) {
n = PollBlockSize/sizeof(*pd);
if(n == 0)
......@@ -462,6 +462,6 @@ allocPollDesc(void)
}
pd = pollcache.first;
pollcache.first = pd->link;
runtime·unlock(&pollcache);
runtime·unlock(&pollcache.lock);
return pd;
}
This diff is collapsed.
......@@ -375,7 +375,7 @@ struct M
struct P
{
Lock;
Lock lock;
int32 id;
uint32 status; // one of Pidle/Prunning/...
......@@ -506,7 +506,7 @@ enum {
struct Timers
{
Lock;
Lock lock;
G *timerproc;
bool sleeping;
bool rescheduling;
......
......@@ -36,7 +36,7 @@ struct SemaWaiter
typedef struct SemaRoot SemaRoot;
struct SemaRoot
{
Lock;
Lock lock;
SemaWaiter* head;
SemaWaiter* tail;
// Number of waiters. Read w/o the lock.
......@@ -48,7 +48,7 @@ struct SemaRoot
struct semtable
{
SemaRoot;
SemaRoot root;
uint8 pad[CacheLineSize-sizeof(SemaRoot)];
};
#pragma dataflag NOPTR /* mark semtable as 'no pointers', hiding from garbage collector */
......@@ -57,7 +57,7 @@ static struct semtable semtable[SEMTABLESZ];
static SemaRoot*
semroot(uint32 *addr)
{
return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
return &semtable[((uintptr)addr >> 3) % SEMTABLESZ].root;
}
static void
......@@ -125,19 +125,19 @@ runtime·semacquire(uint32 volatile *addr, bool profile)
s.releasetime = -1;
}
for(;;) {
runtime·lock(root);
runtime·lock(&root->lock);
// Add ourselves to nwait to disable "easy case" in semrelease.
runtime·xadd(&root->nwait, 1);
// Check cansemacquire to avoid missed wakeup.
if(cansemacquire(addr)) {
runtime·xadd(&root->nwait, -1);
runtime·unlock(root);
runtime·unlock(&root->lock);
return;
}
// Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep.
semqueue(root, addr, &s);
runtime·parkunlock(root, "semacquire");
runtime·parkunlock(&root->lock, "semacquire");
if(cansemacquire(addr)) {
if(t0)
runtime·blockevent(s.releasetime - t0, 3);
......@@ -162,11 +162,11 @@ runtime·semrelease(uint32 volatile *addr)
return;
// Harder case: search for a waiter and wake it.
runtime·lock(root);
runtime·lock(&root->lock);
if(runtime·atomicload(&root->nwait) == 0) {
// The count is already consumed by another goroutine,
// so no need to wake up another goroutine.
runtime·unlock(root);
runtime·unlock(&root->lock);
return;
}
for(s = root->head; s; s = s->next) {
......@@ -176,7 +176,7 @@ runtime·semrelease(uint32 volatile *addr)
break;
}
}
runtime·unlock(root);
runtime·unlock(&root->lock);
if(s) {
if(s->releasetime)
s->releasetime = runtime·cputicks();
......@@ -206,7 +206,7 @@ func runtime_Semrelease(addr *uint32) {
typedef struct SyncSema SyncSema;
struct SyncSema
{
Lock;
Lock lock;
SemaWaiter* head;
SemaWaiter* tail;
};
......@@ -233,7 +233,7 @@ func runtime_Syncsemacquire(s *SyncSema) {
w.releasetime = -1;
}
runtime·lock(s);
runtime·lock(&s->lock);
if(s->head && s->head->nrelease > 0) {
// have pending release, consume it
wake = nil;
......@@ -244,7 +244,7 @@ func runtime_Syncsemacquire(s *SyncSema) {
if(s->head == nil)
s->tail = nil;
}
runtime·unlock(s);
runtime·unlock(&s->lock);
if(wake)
runtime·ready(wake->g);
} else {
......@@ -254,7 +254,7 @@ func runtime_Syncsemacquire(s *SyncSema) {
else
s->tail->next = &w;
s->tail = &w;
runtime·parkunlock(s, "semacquire");
runtime·parkunlock(&s->lock, "semacquire");
if(t0)
runtime·blockevent(w.releasetime - t0, 2);
}
......@@ -269,7 +269,7 @@ func runtime_Syncsemrelease(s *SyncSema, n uint32) {
w.next = nil;
w.releasetime = 0;
runtime·lock(s);
runtime·lock(&s->lock);
while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
// have pending acquire, satisfy it
wake = s->head;
......@@ -288,7 +288,7 @@ func runtime_Syncsemrelease(s *SyncSema, n uint32) {
else
s->tail->next = &w;
s->tail = &w;
runtime·parkunlock(s, "semarelease");
runtime·parkunlock(&s->lock, "semarelease");
} else
runtime·unlock(s);
runtime·unlock(&s->lock);
}
......@@ -33,7 +33,7 @@ package runtime
#pragma textflag NOPTR
static struct {
Note;
Note note;
uint32 mask[(NSIG+31)/32];
uint32 wanted[(NSIG+31)/32];
uint32 recv[(NSIG+31)/32];
......@@ -72,7 +72,7 @@ runtime·sigsend(int32 s)
new = HASSIGNAL;
if(runtime·cas(&sig.state, old, new)) {
if (old == HASWAITER)
runtime·notewakeup(&sig);
runtime·notewakeup(&sig.note);
break;
}
}
......@@ -108,8 +108,8 @@ func signal_recv() (m uint32) {
new = HASWAITER;
if(runtime·cas(&sig.state, old, new)) {
if (new == HASWAITER) {
runtime·notetsleepg(&sig, -1);
runtime·noteclear(&sig);
runtime·notetsleepg(&sig.note, -1);
runtime·noteclear(&sig.note);
}
break;
}
......@@ -139,7 +139,7 @@ func signal_enable(s uint32) {
// to use for initialization. It does not pass
// signal information in m.
sig.inuse = true; // enable reception of signals; cannot disable
runtime·noteclear(&sig);
runtime·noteclear(&sig.note);
return;
}
......
......@@ -92,9 +92,9 @@ runtime·tsleep(int64 ns, int8 *reason)
t.period = 0;
t.fv = &readyv;
t.arg.data = g;
runtime·lock(&timers);
runtime·lock(&timers.lock);
addtimer(&t);
runtime·parkunlock(&timers, reason);
runtime·parkunlock(&timers.lock, reason);
}
static FuncVal timerprocv = {timerproc};
......@@ -102,9 +102,9 @@ static FuncVal timerprocv = {timerproc};
void
runtime·addtimer(Timer *t)
{
runtime·lock(&timers);
runtime·lock(&timers.lock);
addtimer(t);
runtime·unlock(&timers);
runtime·unlock(&timers.lock);
}
// Add a timer to the heap and start or kick the timer proc
......@@ -165,14 +165,14 @@ runtime·deltimer(Timer *t)
i = t->i;
USED(i);
runtime·lock(&timers);
runtime·lock(&timers.lock);
// t may not be registered anymore and may have
// a bogus i (typically 0, if generated by Go).
// Verify it before proceeding.
i = t->i;
if(i < 0 || i >= timers.len || timers.t[i] != t) {
runtime·unlock(&timers);
runtime·unlock(&timers.lock);
return false;
}
......@@ -188,7 +188,7 @@ runtime·deltimer(Timer *t)
}
if(debug)
dumptimers("deltimer");
runtime·unlock(&timers);
runtime·unlock(&timers.lock);
return true;
}
......@@ -205,7 +205,7 @@ timerproc(void)
Eface arg;
for(;;) {
runtime·lock(&timers);
runtime·lock(&timers.lock);
timers.sleeping = false;
now = runtime·nanotime();
for(;;) {
......@@ -230,7 +230,7 @@ timerproc(void)
}
f = (void*)t->fv->fn;
arg = t->arg;
runtime·unlock(&timers);
runtime·unlock(&timers.lock);
if(raceenabled)
runtime·raceacquire(t);
f(now, arg);
......@@ -242,20 +242,20 @@ timerproc(void)
arg.data = nil;
USED(&arg);
runtime·lock(&timers);
runtime·lock(&timers.lock);
}
if(delta < 0) {
// No timers left - put goroutine to sleep.
timers.rescheduling = true;
g->isbackground = true;
runtime·parkunlock(&timers, "timer goroutine (idle)");
runtime·parkunlock(&timers.lock, "timer goroutine (idle)");
g->isbackground = false;
continue;
}
// At least one timer pending. Sleep until then.
timers.sleeping = true;
runtime·noteclear(&timers.waitnote);
runtime·unlock(&timers);
runtime·unlock(&timers.lock);
runtime·notetsleepg(&timers.waitnote, delta);
}
}
......
......@@ -66,14 +66,14 @@ struct IMethod
struct InterfaceType
{
Type;
Type typ;
Slice mhdr;
IMethod m[];
};
struct MapType
{
Type;
Type typ;
Type *key;
Type *elem;
Type *bucket; // internal type representing a hash bucket
......@@ -87,20 +87,20 @@ struct MapType
struct ChanType
{
Type;
Type typ;
Type *elem;
uintptr dir;
};
struct SliceType
{
Type;
Type typ;
Type *elem;
};
struct FuncType
{
Type;
Type typ;
bool dotdotdot;
Slice in;
Slice out;
......@@ -108,6 +108,6 @@ struct FuncType
struct PtrType
{
Type;
Type typ;
Type *elem;
};
This diff is collapsed.
......@@ -40,21 +40,8 @@ typedef signed char schar;
typedef struct Vlong Vlong;
struct Vlong
{
union
{
struct
{
ulong lo;
ulong hi;
};
struct
{
ushort lols;
ushort loms;
ushort hils;
ushort hims;
};
};
ulong lo;
ulong hi;
};
void runtime·abort(void);
......@@ -82,15 +69,15 @@ _subv(Vlong *r, Vlong a, Vlong b)
void
_d2v(Vlong *y, double d)
{
union { double d; struct Vlong; } x;
union { double d; Vlong vl; } x;
ulong xhi, xlo, ylo, yhi;
int sh;
x.d = d;
xhi = (x.hi & 0xfffff) | 0x100000;
xlo = x.lo;
sh = 1075 - ((x.hi >> 20) & 0x7ff);
xhi = (x.vl.hi & 0xfffff) | 0x100000;
xlo = x.vl.lo;
sh = 1075 - ((x.vl.hi >> 20) & 0x7ff);
ylo = 0;
yhi = 0;
......@@ -123,7 +110,7 @@ _d2v(Vlong *y, double d)
yhi = d; /* causes something awful */
}
}
if(x.hi & SIGN(32)) {
if(x.vl.hi & SIGN(32)) {
if(ylo != 0) {
ylo = -ylo;
yhi = ~yhi;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment