Commit 8ecb9a76 authored by Russ Cox's avatar Russ Cox

runtime: rename Lock to Mutex

Mutex is consistent with package sync, and when in the
unexported Go form it avoids having a conflcit between
the type (now mutex) and the function (lock).

LGTM=iant
R=golang-codereviews, iant
CC=dvyukov, golang-codereviews, r
https://golang.org/cl/133140043
parent 299117ec
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
#include "../../cmd/ld/textflag.h" #include "../../cmd/ld/textflag.h"
static struct { static struct {
Lock l; Mutex l;
byte pad[CacheLineSize-sizeof(Lock)]; byte pad[CacheLineSize-sizeof(Mutex)];
} locktab[57]; } locktab[57];
#define LOCK(addr) (&locktab[((uintptr)(addr)>>3)%nelem(locktab)].l) #define LOCK(addr) (&locktab[((uintptr)(addr)>>3)%nelem(locktab)].l)
......
...@@ -115,7 +115,7 @@ runtime·cgocall(void (*fn)(void*), void *arg) ...@@ -115,7 +115,7 @@ runtime·cgocall(void (*fn)(void*), void *arg)
g->m->ncgocall++; g->m->ncgocall++;
/* /*
* Lock g to m to ensure we stay on the same stack if we do a * Mutex g to m to ensure we stay on the same stack if we do a
* cgo callback. Add entry to defer stack in case of panic. * cgo callback. Add entry to defer stack in case of panic.
*/ */
runtime·lockOSThread(); runtime·lockOSThread();
......
...@@ -130,9 +130,9 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -130,9 +130,9 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
t0 = cputicks() t0 = cputicks()
} }
golock(&c.lock) lock(&c.lock)
if c.closed != 0 { if c.closed != 0 {
gounlock(&c.lock) unlock(&c.lock)
panic("send on closed channel") panic("send on closed channel")
} }
...@@ -142,7 +142,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -142,7 +142,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
if raceenabled { if raceenabled {
racesync(c, sg) racesync(c, sg)
} }
gounlock(&c.lock) unlock(&c.lock)
recvg := sg.g recvg := sg.g
recvg.param = unsafe.Pointer(sg) recvg.param = unsafe.Pointer(sg)
...@@ -162,7 +162,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -162,7 +162,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
} }
if !block { if !block {
gounlock(&c.lock) unlock(&c.lock)
return false return false
} }
...@@ -204,7 +204,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -204,7 +204,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
var t1 int64 var t1 int64
for c.qcount >= c.dataqsiz { for c.qcount >= c.dataqsiz {
if !block { if !block {
gounlock(&c.lock) unlock(&c.lock)
return false return false
} }
gp := getg() gp := getg()
...@@ -223,9 +223,9 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -223,9 +223,9 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
t1 = int64(mysg.releasetime) t1 = int64(mysg.releasetime)
} }
releaseSudog(mysg) releaseSudog(mysg)
golock(&c.lock) lock(&c.lock)
if c.closed != 0 { if c.closed != 0 {
gounlock(&c.lock) unlock(&c.lock)
panic("send on closed channel") panic("send on closed channel")
} }
} }
...@@ -246,13 +246,13 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -246,13 +246,13 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
sg := c.recvq.dequeue() sg := c.recvq.dequeue()
if sg != nil { if sg != nil {
recvg := sg.g recvg := sg.g
gounlock(&c.lock) unlock(&c.lock)
if sg.releasetime != 0 { if sg.releasetime != 0 {
*(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks() *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks()
} }
goready(recvg) goready(recvg)
} else { } else {
gounlock(&c.lock) unlock(&c.lock)
} }
if t1 > 0 { if t1 > 0 {
blockevent(t1-t0, 2) blockevent(t1-t0, 2)
......
...@@ -26,7 +26,7 @@ struct Hchan ...@@ -26,7 +26,7 @@ struct Hchan
uintgo recvx; // receive index uintgo recvx; // receive index
WaitQ recvq; // list of recv waiters WaitQ recvq; // list of recv waiters
WaitQ sendq; // list of send waiters WaitQ sendq; // list of send waiters
Lock lock; Mutex lock;
}; };
// Buffer follows Hchan immediately in memory. // Buffer follows Hchan immediately in memory.
......
...@@ -102,7 +102,7 @@ struct Profile { ...@@ -102,7 +102,7 @@ struct Profile {
bool eod_sent; // special end-of-data record sent; => flushing bool eod_sent; // special end-of-data record sent; => flushing
}; };
static Lock lk; static Mutex lk;
static Profile *prof; static Profile *prof;
static void tick(uintptr*, int32); static void tick(uintptr*, int32);
......
...@@ -19,12 +19,12 @@ var Fintto64 = fintto64 ...@@ -19,12 +19,12 @@ var Fintto64 = fintto64
var F64toint = f64toint var F64toint = f64toint
func entersyscall() func entersyscall()
func golockedOSThread() bool func lockedOSThread() bool
func stackguard() (sp, limit uintptr) func stackguard() (sp, limit uintptr)
var Entersyscall = entersyscall var Entersyscall = entersyscall
var Exitsyscall = exitsyscall var Exitsyscall = exitsyscall
var LockedOSThread = golockedOSThread var LockedOSThread = lockedOSThread
var Stackguard = stackguard var Stackguard = stackguard
type LFNode struct { type LFNode struct {
......
...@@ -13,7 +13,7 @@ const ( ...@@ -13,7 +13,7 @@ const (
) )
var ( var (
ifaceLock lock // lock for accessing hash ifaceLock mutex // lock for accessing hash
hash [hashSize]*itab hash [hashSize]*itab
) )
...@@ -51,7 +51,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { ...@@ -51,7 +51,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
var locked int var locked int
for locked = 0; locked < 2; locked++ { for locked = 0; locked < 2; locked++ {
if locked != 0 { if locked != 0 {
golock(&ifaceLock) lock(&ifaceLock)
} }
for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link { for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ { if m.inter == inter && m._type == typ {
...@@ -69,7 +69,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { ...@@ -69,7 +69,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
} }
} }
if locked != 0 { if locked != 0 {
gounlock(&ifaceLock) unlock(&ifaceLock)
} }
return m return m
} }
...@@ -106,7 +106,7 @@ search: ...@@ -106,7 +106,7 @@ search:
// didn't find method // didn't find method
if !canfail { if !canfail {
if locked != 0 { if locked != 0 {
gounlock(&ifaceLock) unlock(&ifaceLock)
} }
panic(&TypeAssertionError{"", *typ._string, *inter.typ._string, *iname}) panic(&TypeAssertionError{"", *typ._string, *inter.typ._string, *iname})
} }
...@@ -119,7 +119,7 @@ search: ...@@ -119,7 +119,7 @@ search:
} }
m.link = hash[h] m.link = hash[h]
atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m)) atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
gounlock(&ifaceLock) unlock(&ifaceLock)
if m.bad != 0 { if m.bad != 0 {
return nil return nil
} }
......
...@@ -11,7 +11,7 @@ package runtime ...@@ -11,7 +11,7 @@ package runtime
#include "../../cmd/ld/textflag.h" #include "../../cmd/ld/textflag.h"
extern Itab* runtime·hash[1009]; extern Itab* runtime·hash[1009];
extern Lock runtime·ifaceLock; extern Mutex runtime·ifaceLock;
// TODO: delete this when no longer used (ifaceE2I2 is all that's left) // TODO: delete this when no longer used (ifaceE2I2 is all that's left)
static Itab* static Itab*
......
...@@ -35,7 +35,7 @@ enum ...@@ -35,7 +35,7 @@ enum
// Note that there can be spinning threads during all states - they do not // Note that there can be spinning threads during all states - they do not
// affect mutex's state. // affect mutex's state.
void void
runtime·lock(Lock *l) runtime·lock(Mutex *l)
{ {
uint32 i, v, wait, spin; uint32 i, v, wait, spin;
...@@ -89,7 +89,7 @@ runtime·lock(Lock *l) ...@@ -89,7 +89,7 @@ runtime·lock(Lock *l)
} }
void void
runtime·unlock(Lock *l) runtime·unlock(Mutex *l)
{ {
uint32 v; uint32 v;
......
...@@ -34,7 +34,7 @@ enum ...@@ -34,7 +34,7 @@ enum
}; };
void void
runtime·lock(Lock *l) runtime·lock(Mutex *l)
{ {
uintptr v; uintptr v;
uint32 i, spin; uint32 i, spin;
...@@ -90,7 +90,7 @@ unlocked: ...@@ -90,7 +90,7 @@ unlocked:
} }
void void
runtime·unlock(Lock *l) runtime·unlock(Mutex *l)
{ {
uintptr v; uintptr v;
M *mp; M *mp;
......
...@@ -350,7 +350,7 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n) ...@@ -350,7 +350,7 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
static struct static struct
{ {
Lock lock; Mutex lock;
byte* pos; byte* pos;
byte* end; byte* end;
} persistent; } persistent;
......
...@@ -422,11 +422,11 @@ func gogc(force int32) { ...@@ -422,11 +422,11 @@ func gogc(force int32) {
return return
} }
if gcpercent == gcpercentUnknown { if gcpercent == gcpercentUnknown {
golock(&mheap_.lock) lock(&mheap_.lock)
if gcpercent == gcpercentUnknown { if gcpercent == gcpercentUnknown {
gcpercent = readgogc() gcpercent = readgogc()
} }
gounlock(&mheap_.lock) unlock(&mheap_.lock)
} }
if gcpercent < 0 { if gcpercent < 0 {
return return
......
...@@ -242,7 +242,7 @@ struct MStats ...@@ -242,7 +242,7 @@ struct MStats
uint64 nfree; // number of frees uint64 nfree; // number of frees
// Statistics about malloc heap. // Statistics about malloc heap.
// protected by mheap.Lock // protected by mheap.lock
uint64 heap_alloc; // bytes allocated and still in use uint64 heap_alloc; // bytes allocated and still in use
uint64 heap_sys; // bytes obtained from system uint64 heap_sys; // bytes obtained from system
uint64 heap_idle; // bytes in idle spans uint64 heap_idle; // bytes in idle spans
...@@ -421,7 +421,7 @@ struct MSpan ...@@ -421,7 +421,7 @@ struct MSpan
int64 unusedsince; // First time spotted by GC in MSpanFree state int64 unusedsince; // First time spotted by GC in MSpanFree state
uintptr npreleased; // number of pages released to the OS uintptr npreleased; // number of pages released to the OS
byte *limit; // end of data in span byte *limit; // end of data in span
Lock specialLock; // guards specials list Mutex specialLock; // guards specials list
Special *specials; // linked list of special records sorted by offset. Special *specials; // linked list of special records sorted by offset.
}; };
...@@ -442,7 +442,7 @@ void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in ...@@ -442,7 +442,7 @@ void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in
// Central list of free objects of a given size. // Central list of free objects of a given size.
struct MCentral struct MCentral
{ {
Lock lock; Mutex lock;
int32 sizeclass; int32 sizeclass;
MSpan nonempty; // list of spans with a free object MSpan nonempty; // list of spans with a free object
MSpan empty; // list of spans with no free objects (or cached in an MCache) MSpan empty; // list of spans with no free objects (or cached in an MCache)
...@@ -458,7 +458,7 @@ bool runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, ML ...@@ -458,7 +458,7 @@ bool runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, ML
// but all the other global data is here too. // but all the other global data is here too.
struct MHeap struct MHeap
{ {
Lock lock; Mutex lock;
MSpan free[MaxMHeapList]; // free lists of given length MSpan free[MaxMHeapList]; // free lists of given length
MSpan freelarge; // free lists length >= MaxMHeapList MSpan freelarge; // free lists length >= MaxMHeapList
MSpan busy[MaxMHeapList]; // busy lists of large objects of given length MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
...@@ -484,7 +484,7 @@ struct MHeap ...@@ -484,7 +484,7 @@ struct MHeap
// central free lists for small size classes. // central free lists for small size classes.
// the padding makes sure that the MCentrals are // the padding makes sure that the MCentrals are
// spaced CacheLineSize bytes apart, so that each MCentral.Lock // spaced CacheLineSize bytes apart, so that each MCentral.lock
// gets its own cache line. // gets its own cache line.
struct { struct {
MCentral mcentral; MCentral mcentral;
...@@ -495,7 +495,7 @@ struct MHeap ...@@ -495,7 +495,7 @@ struct MHeap
FixAlloc cachealloc; // allocator for MCache* FixAlloc cachealloc; // allocator for MCache*
FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer* FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer*
FixAlloc specialprofilealloc; // allocator for SpecialProfile* FixAlloc specialprofilealloc; // allocator for SpecialProfile*
Lock speciallock; // lock for sepcial record allocators. Mutex speciallock; // lock for sepcial record allocators.
// Malloc stats. // Malloc stats.
uint64 largefree; // bytes freed for large objects (>MaxSmallSize) uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
extern byte runtime·end[]; extern byte runtime·end[];
static byte *bloc = { runtime·end }; static byte *bloc = { runtime·end };
static Lock memlock; static Mutex memlock;
enum enum
{ {
......
...@@ -166,7 +166,7 @@ extern byte runtime·ebss[]; ...@@ -166,7 +166,7 @@ extern byte runtime·ebss[];
extern byte runtime·gcdata[]; extern byte runtime·gcdata[];
extern byte runtime·gcbss[]; extern byte runtime·gcbss[];
static Lock finlock; // protects the following variables static Mutex finlock; // protects the following variables
static FinBlock *finq; // list of finalizers that are to be executed static FinBlock *finq; // list of finalizers that are to be executed
static FinBlock *finc; // cache of free blocks static FinBlock *finc; // cache of free blocks
static FinBlock *allfin; // list of all blocks static FinBlock *allfin; // list of all blocks
...@@ -175,7 +175,7 @@ bool runtime·fingwake; ...@@ -175,7 +175,7 @@ bool runtime·fingwake;
BitVector runtime·gcdatamask; BitVector runtime·gcdatamask;
BitVector runtime·gcbssmask; BitVector runtime·gcbssmask;
static Lock gclock; static Mutex gclock;
static void runfinq(void); static void runfinq(void);
static void bgsweep(void); static void bgsweep(void);
...@@ -1892,7 +1892,7 @@ runtime·unrollgcproginplace_m(void) ...@@ -1892,7 +1892,7 @@ runtime·unrollgcproginplace_m(void)
void void
runtime·unrollgcprog_m(void) runtime·unrollgcprog_m(void)
{ {
static Lock lock; static Mutex lock;
Type *typ; Type *typ;
byte *mask, *prog; byte *mask, *prog;
uintptr pos; uintptr pos;
......
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
// Patterned after tcmalloc's algorithms; shorter code. // Patterned after tcmalloc's algorithms; shorter code.
// NOTE(rsc): Everything here could use cas if contention became an issue. // NOTE(rsc): Everything here could use cas if contention became an issue.
var proflock lock var proflock mutex
// All memory allocations are local and do not escape outside of the profiler. // All memory allocations are local and do not escape outside of the profiler.
// The profiler is forbidden from referring to garbage-collected memory. // The profiler is forbidden from referring to garbage-collected memory.
...@@ -35,7 +35,7 @@ var ( ...@@ -35,7 +35,7 @@ var (
// the testing package's -test.memprofile flag instead // the testing package's -test.memprofile flag instead
// of calling MemProfile directly. // of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
golock(&proflock) lock(&proflock)
clear := true clear := true
for b := mbuckets; b != nil; b = b.allnext { for b := mbuckets; b != nil; b = b.allnext {
if inuseZero || b.data.mp.alloc_bytes != b.data.mp.free_bytes { if inuseZero || b.data.mp.alloc_bytes != b.data.mp.free_bytes {
...@@ -69,7 +69,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { ...@@ -69,7 +69,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
} }
} }
} }
gounlock(&proflock) unlock(&proflock)
return return
} }
...@@ -114,7 +114,7 @@ func record(r *MemProfileRecord, b *bucket) { ...@@ -114,7 +114,7 @@ func record(r *MemProfileRecord, b *bucket) {
// the testing package's -test.blockprofile flag instead // the testing package's -test.blockprofile flag instead
// of calling BlockProfile directly. // of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
golock(&proflock) lock(&proflock)
for b := bbuckets; b != nil; b = b.allnext { for b := bbuckets; b != nil; b = b.allnext {
n++ n++
} }
...@@ -137,7 +137,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { ...@@ -137,7 +137,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
idx++ idx++
} }
} }
gounlock(&proflock) unlock(&proflock)
return return
} }
......
...@@ -14,7 +14,7 @@ package runtime ...@@ -14,7 +14,7 @@ package runtime
#include "type.h" #include "type.h"
// NOTE(rsc): Everything here could use cas if contention became an issue. // NOTE(rsc): Everything here could use cas if contention became an issue.
extern Lock runtime·proflock; extern Mutex runtime·proflock;
// All memory allocations are local and do not escape outside of the profiler. // All memory allocations are local and do not escape outside of the profiler.
// The profiler is forbidden from referring to garbage-collected memory. // The profiler is forbidden from referring to garbage-collected memory.
...@@ -296,7 +296,7 @@ func GoroutineProfile(b Slice) (n int, ok bool) { ...@@ -296,7 +296,7 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
// Tracing of alloc/free/gc. // Tracing of alloc/free/gc.
static Lock tracelock; static Mutex tracelock;
void void
runtime·tracealloc(void *p, uintptr size, Type *type) runtime·tracealloc(void *p, uintptr size, Type *type)
......
...@@ -56,14 +56,14 @@ void runtime·deltimer(Timer*); ...@@ -56,14 +56,14 @@ void runtime·deltimer(Timer*);
struct PollDesc struct PollDesc
{ {
PollDesc* link; // in pollcache, protected by pollcache.Lock PollDesc* link; // in pollcache, protected by pollcache.lock
// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations. // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime. // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification) // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification)
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
// in a lock-free way by all operations. // in a lock-free way by all operations.
Lock lock; // protectes the following fields Mutex lock; // protects the following fields
uintptr fd; uintptr fd;
bool closing; bool closing;
uintptr seq; // protects from stale timers and ready notifications uintptr seq; // protects from stale timers and ready notifications
...@@ -78,7 +78,7 @@ struct PollDesc ...@@ -78,7 +78,7 @@ struct PollDesc
static struct static struct
{ {
Lock lock; Mutex lock;
PollDesc* first; PollDesc* first;
// PollDesc objects must be type-stable, // PollDesc objects must be type-stable,
// because we can get ready notification from epoll/kqueue // because we can get ready notification from epoll/kqueue
......
...@@ -485,7 +485,7 @@ runtime·profileloop1(void) ...@@ -485,7 +485,7 @@ runtime·profileloop1(void)
void void
runtime·resetcpuprofiler(int32 hz) runtime·resetcpuprofiler(int32 hz)
{ {
static Lock lock; static Mutex lock;
void *timer, *thread; void *timer, *thread;
int32 ms; int32 ms;
int64 due; int64 due;
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
// Code related to defer, panic and recover. // Code related to defer, panic and recover.
uint32 runtime·panicking; uint32 runtime·panicking;
static Lock paniclk; static Mutex paniclk;
// Each P holds pool for defers with arg sizes 8, 24, 40, 56 and 72 bytes. // Each P holds pool for defers with arg sizes 8, 24, 40, 56 and 72 bytes.
// Memory block is 40 (24 for 32 bits) bytes larger due to Defer header. // Memory block is 40 (24 for 32 bits) bytes larger due to Defer header.
...@@ -448,7 +448,7 @@ runtime·dopanic(int32 unused) ...@@ -448,7 +448,7 @@ runtime·dopanic(int32 unused)
// Let it print what it needs to print. // Let it print what it needs to print.
// Wait forever without chewing up cpu. // Wait forever without chewing up cpu.
// It will exit when it's done. // It will exit when it's done.
static Lock deadlock; static Mutex deadlock;
runtime·lock(&deadlock); runtime·lock(&deadlock);
runtime·lock(&deadlock); runtime·lock(&deadlock);
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "type.h" #include "type.h"
#include "../../cmd/ld/textflag.h" #include "../../cmd/ld/textflag.h"
//static Lock debuglock; //static Mutex debuglock;
static void vprintf(int8*, byte*); static void vprintf(int8*, byte*);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
typedef struct Sched Sched; typedef struct Sched Sched;
struct Sched { struct Sched {
Lock lock; Mutex lock;
uint64 goidgen; uint64 goidgen;
...@@ -46,7 +46,7 @@ struct Sched { ...@@ -46,7 +46,7 @@ struct Sched {
int32 runqsize; int32 runqsize;
// Global cache of dead G's. // Global cache of dead G's.
Lock gflock; Mutex gflock;
G* gfree; G* gfree;
int32 ngfree; int32 ngfree;
...@@ -84,7 +84,7 @@ int8* runtime·goos; ...@@ -84,7 +84,7 @@ int8* runtime·goos;
int32 runtime·ncpu; int32 runtime·ncpu;
static int32 newprocs; static int32 newprocs;
static Lock allglock; // the following vars are protected by this lock or by stoptheworld static Mutex allglock; // the following vars are protected by this lock or by stoptheworld
G** runtime·allg; G** runtime·allg;
uintptr runtime·allglen; uintptr runtime·allglen;
static uintptr allgcap; static uintptr allgcap;
...@@ -133,7 +133,7 @@ static void allgadd(G*); ...@@ -133,7 +133,7 @@ static void allgadd(G*);
static void forcegchelper(void); static void forcegchelper(void);
static struct static struct
{ {
Lock lock; Mutex lock;
G* g; G* g;
FuncVal fv; FuncVal fv;
uint32 idle; uint32 idle;
...@@ -1570,7 +1570,7 @@ runtime·parkunlock_c(G *gp, void *lock) ...@@ -1570,7 +1570,7 @@ runtime·parkunlock_c(G *gp, void *lock)
// Puts the current goroutine into a waiting state and unlocks the lock. // Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling runtime·ready(gp). // The goroutine can be made runnable again by calling runtime·ready(gp).
void void
runtime·parkunlock(Lock *lock, String reason) runtime·parkunlock(Mutex *lock, String reason)
{ {
runtime·park(runtime·parkunlock_c, lock, reason); runtime·park(runtime·parkunlock_c, lock, reason);
} }
...@@ -2399,7 +2399,7 @@ runtime·badreflectcall(void) // called from assembly ...@@ -2399,7 +2399,7 @@ runtime·badreflectcall(void) // called from assembly
} }
static struct { static struct {
Lock lock; Mutex lock;
void (*fn)(uintptr*, int32); void (*fn)(uintptr*, int32);
int32 hz; int32 hz;
} prof; } prof;
......
...@@ -59,7 +59,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) { ...@@ -59,7 +59,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
// Puts the current goroutine into a waiting state and unlocks the lock. // Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp). // The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *lock, reason string) { func goparkunlock(lock *mutex, reason string) {
gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason) gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason)
} }
......
...@@ -272,7 +272,7 @@ runtime·fastrand1(void) ...@@ -272,7 +272,7 @@ runtime·fastrand1(void)
return x; return x;
} }
static Lock ticksLock; static Mutex ticksLock;
static int64 ticks; static int64 ticks;
int64 int64
......
...@@ -57,7 +57,7 @@ typedef struct Func Func; ...@@ -57,7 +57,7 @@ typedef struct Func Func;
typedef struct G G; typedef struct G G;
typedef struct Gobuf Gobuf; typedef struct Gobuf Gobuf;
typedef struct SudoG SudoG; typedef struct SudoG SudoG;
typedef struct Lock Lock; typedef struct Mutex Mutex;
typedef struct M M; typedef struct M M;
typedef struct P P; typedef struct P P;
typedef struct Note Note; typedef struct Note Note;
...@@ -160,7 +160,7 @@ enum ...@@ -160,7 +160,7 @@ enum
/* /*
* structures * structures
*/ */
struct Lock struct Mutex
{ {
// Futex-based impl treats it as uint32 key, // Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm. // while sema-based impl as M* waitm.
...@@ -394,7 +394,7 @@ struct M ...@@ -394,7 +394,7 @@ struct M
struct P struct P
{ {
Lock lock; Mutex lock;
int32 id; int32 id;
uint32 status; // one of Pidle/Prunning/... uint32 status; // one of Pidle/Prunning/...
...@@ -915,7 +915,7 @@ void runtime·gosched(void); ...@@ -915,7 +915,7 @@ void runtime·gosched(void);
void runtime·gosched_m(G*); void runtime·gosched_m(G*);
void runtime·schedtrace(bool); void runtime·schedtrace(bool);
void runtime·park(bool(*)(G*, void*), void*, String); void runtime·park(bool(*)(G*, void*), void*, String);
void runtime·parkunlock(Lock*, String); void runtime·parkunlock(Mutex*, String);
void runtime·tsleep(int64, String); void runtime·tsleep(int64, String);
M* runtime·newm(void); M* runtime·newm(void);
void runtime·goexit(void); void runtime·goexit(void);
...@@ -986,10 +986,10 @@ extern uint32 runtime·worldsema; ...@@ -986,10 +986,10 @@ extern uint32 runtime·worldsema;
* mutual exclusion locks. in the uncontended case, * mutual exclusion locks. in the uncontended case,
* as fast as spin locks (just a few user-level instructions), * as fast as spin locks (just a few user-level instructions),
* but on the contention path they sleep in the kernel. * but on the contention path they sleep in the kernel.
* a zeroed Lock is unlocked (no need to initialize each lock). * a zeroed Mutex is unlocked (no need to initialize each lock).
*/ */
void runtime·lock(Lock*); void runtime·lock(Mutex*);
void runtime·unlock(Lock*); void runtime·unlock(Mutex*);
/* /*
* sleep and wakeup on one-time events. * sleep and wakeup on one-time events.
...@@ -1030,7 +1030,7 @@ void runtime·futexsleep(uint32*, uint32, int64); ...@@ -1030,7 +1030,7 @@ void runtime·futexsleep(uint32*, uint32, int64);
void runtime·futexwakeup(uint32*, uint32); void runtime·futexwakeup(uint32*, uint32);
/* /*
* Lock-free stack. * Mutex-free stack.
* Initialize uint64 head to 0, compare with 0 to test for emptiness. * Initialize uint64 head to 0, compare with 0 to test for emptiness.
* The stack does not keep pointers to nodes, * The stack does not keep pointers to nodes,
* so they can be garbage collected if there are no other pointers to nodes. * so they can be garbage collected if there are no other pointers to nodes.
......
...@@ -24,7 +24,7 @@ import "unsafe" ...@@ -24,7 +24,7 @@ import "unsafe"
// Asynchronous semaphore for sync.Mutex. // Asynchronous semaphore for sync.Mutex.
type semaRoot struct { type semaRoot struct {
lock lock mutex
head *sudog head *sudog
tail *sudog tail *sudog
nwait uint32 // Number of waiters. Read w/o the lock. nwait uint32 // Number of waiters. Read w/o the lock.
...@@ -69,13 +69,13 @@ func semacquire(addr *uint32, profile bool) { ...@@ -69,13 +69,13 @@ func semacquire(addr *uint32, profile bool) {
s.releasetime = -1 s.releasetime = -1
} }
for { for {
golock(&root.lock) lock(&root.lock)
// Add ourselves to nwait to disable "easy case" in semrelease. // Add ourselves to nwait to disable "easy case" in semrelease.
xadd(&root.nwait, 1) xadd(&root.nwait, 1)
// Check cansemacquire to avoid missed wakeup. // Check cansemacquire to avoid missed wakeup.
if cansemacquire(addr) { if cansemacquire(addr) {
xadd(&root.nwait, -1) xadd(&root.nwait, -1)
gounlock(&root.lock) unlock(&root.lock)
break break
} }
// Any semrelease after the cansemacquire knows we're waiting // Any semrelease after the cansemacquire knows we're waiting
...@@ -104,11 +104,11 @@ func semrelease(addr *uint32) { ...@@ -104,11 +104,11 @@ func semrelease(addr *uint32) {
} }
// Harder case: search for a waiter and wake it. // Harder case: search for a waiter and wake it.
golock(&root.lock) lock(&root.lock)
if atomicload(&root.nwait) == 0 { if atomicload(&root.nwait) == 0 {
// The count is already consumed by another goroutine, // The count is already consumed by another goroutine,
// so no need to wake up another goroutine. // so no need to wake up another goroutine.
gounlock(&root.lock) unlock(&root.lock)
return return
} }
s := root.head s := root.head
...@@ -119,7 +119,7 @@ func semrelease(addr *uint32) { ...@@ -119,7 +119,7 @@ func semrelease(addr *uint32) {
break break
} }
} }
gounlock(&root.lock) unlock(&root.lock)
if s != nil { if s != nil {
if s.releasetime != 0 { if s.releasetime != 0 {
s.releasetime = cputicks() s.releasetime = cputicks()
...@@ -174,14 +174,14 @@ func (root *semaRoot) dequeue(s *sudog) { ...@@ -174,14 +174,14 @@ func (root *semaRoot) dequeue(s *sudog) {
// Synchronous semaphore for sync.Cond. // Synchronous semaphore for sync.Cond.
type syncSema struct { type syncSema struct {
lock lock lock mutex
head *sudog head *sudog
tail *sudog tail *sudog
} }
// Syncsemacquire waits for a pairing syncsemrelease on the same semaphore s. // Syncsemacquire waits for a pairing syncsemrelease on the same semaphore s.
func syncsemacquire(s *syncSema) { func syncsemacquire(s *syncSema) {
golock(&s.lock) lock(&s.lock)
if s.head != nil && s.head.nrelease > 0 { if s.head != nil && s.head.nrelease > 0 {
// Have pending release, consume it. // Have pending release, consume it.
var wake *sudog var wake *sudog
...@@ -193,7 +193,7 @@ func syncsemacquire(s *syncSema) { ...@@ -193,7 +193,7 @@ func syncsemacquire(s *syncSema) {
s.tail = nil s.tail = nil
} }
} }
gounlock(&s.lock) unlock(&s.lock)
if wake != nil { if wake != nil {
goready(wake.g) goready(wake.g)
} }
...@@ -225,7 +225,7 @@ func syncsemacquire(s *syncSema) { ...@@ -225,7 +225,7 @@ func syncsemacquire(s *syncSema) {
// Syncsemrelease waits for n pairing syncsemacquire on the same semaphore s. // Syncsemrelease waits for n pairing syncsemacquire on the same semaphore s.
func syncsemrelease(s *syncSema, n uint32) { func syncsemrelease(s *syncSema, n uint32) {
golock(&s.lock) lock(&s.lock)
for n > 0 && s.head != nil && s.head.nrelease < 0 { for n > 0 && s.head != nil && s.head.nrelease < 0 {
// Have pending acquire, satisfy it. // Have pending acquire, satisfy it.
wake := s.head wake := s.head
...@@ -254,7 +254,7 @@ func syncsemrelease(s *syncSema, n uint32) { ...@@ -254,7 +254,7 @@ func syncsemrelease(s *syncSema, n uint32) {
s.tail = w s.tail = w
goparkunlock(&s.lock, "semarelease") goparkunlock(&s.lock, "semarelease")
} else { } else {
gounlock(&s.lock) unlock(&s.lock)
} }
} }
......
...@@ -32,7 +32,7 @@ enum ...@@ -32,7 +32,7 @@ enum
// order = log_2(size/FixedStack) // order = log_2(size/FixedStack)
// There is a free list for each order. // There is a free list for each order.
static MSpan stackpool[NumStackOrders]; static MSpan stackpool[NumStackOrders];
static Lock stackpoolmu; static Mutex stackpoolmu;
// TODO: one lock per order? // TODO: one lock per order?
void void
......
...@@ -132,9 +132,6 @@ var noequalcode uintptr ...@@ -132,9 +132,6 @@ var noequalcode uintptr
// in panic.c // in panic.c
func gothrow(s string) func gothrow(s string)
func golock(x *lock)
func gounlock(x *lock)
// Return the Go equivalent of the C Alg structure. // Return the Go equivalent of the C Alg structure.
// TODO: at some point Go will hold the truth for the layout // TODO: at some point Go will hold the truth for the layout
// of runtime structures and C will be derived from it (if // of runtime structures and C will be derived from it (if
...@@ -201,6 +198,8 @@ func notetsleep(n *note, ns int64) ...@@ -201,6 +198,8 @@ func notetsleep(n *note, ns int64)
func notewakeup(n *note) func notewakeup(n *note)
func notesleep(n *note) func notesleep(n *note)
func noteclear(n *note) func noteclear(n *note)
func lock(lk *mutex)
func unlock(lk *mutex)
//go:noescape //go:noescape
func cas(ptr *uint32, old, new uint32) bool func cas(ptr *uint32, old, new uint32) bool
......
...@@ -23,15 +23,6 @@ package runtime ...@@ -23,15 +23,6 @@ package runtime
// These invariants do not hold yet but will be established once we have // These invariants do not hold yet but will be established once we have
// finished converting runtime support code from C to Go. // finished converting runtime support code from C to Go.
#pragma textflag NOSPLIT
func golock(p *Lock) {
runtime·lock(p);
}
#pragma textflag NOSPLIT
func gounlock(p *Lock) {
runtime·unlock(p);
}
// entry point for testing // entry point for testing
// TODO: mcall and run on M stack // TODO: mcall and run on M stack
func gostringW(str Slice) (s String) { func gostringW(str Slice) (s String) {
......
...@@ -59,8 +59,8 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) { ...@@ -59,8 +59,8 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) {
argsize += uintptrSize argsize += uintptrSize
} }
golock(&cbs.lock) lock(&cbs.lock)
defer gounlock(&cbs.lock) defer unlock(&cbs.lock)
n := cbs.n n := cbs.n
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
......
...@@ -25,7 +25,7 @@ type timer struct { ...@@ -25,7 +25,7 @@ type timer struct {
} }
var timers struct { var timers struct {
lock lock lock mutex
gp *g gp *g
created bool created bool
sleeping bool sleeping bool
...@@ -52,7 +52,7 @@ func timeSleep(ns int64) { ...@@ -52,7 +52,7 @@ func timeSleep(ns int64) {
t.when = nanotime() + ns t.when = nanotime() + ns
t.f = goroutineReady t.f = goroutineReady
t.arg = getg() t.arg = getg()
golock(&timers.lock) lock(&timers.lock)
addtimerLocked(t) addtimerLocked(t)
goparkunlock(&timers.lock, "sleep") goparkunlock(&timers.lock, "sleep")
} }
...@@ -79,9 +79,9 @@ func goroutineReady(arg interface{}) { ...@@ -79,9 +79,9 @@ func goroutineReady(arg interface{}) {
} }
func addtimer(t *timer) { func addtimer(t *timer) {
golock(&timers.lock) lock(&timers.lock)
addtimerLocked(t) addtimerLocked(t)
gounlock(&timers.lock) unlock(&timers.lock)
} }
// Add a timer to the heap and start or kick the timer proc. // Add a timer to the heap and start or kick the timer proc.
...@@ -120,14 +120,14 @@ func deltimer(t *timer) bool { ...@@ -120,14 +120,14 @@ func deltimer(t *timer) bool {
// Discard result, because t might be moving in the heap. // Discard result, because t might be moving in the heap.
_ = t.i _ = t.i
golock(&timers.lock) lock(&timers.lock)
// t may not be registered anymore and may have // t may not be registered anymore and may have
// a bogus i (typically 0, if generated by Go). // a bogus i (typically 0, if generated by Go).
// Verify it before proceeding. // Verify it before proceeding.
i := t.i i := t.i
last := len(timers.t) - 1 last := len(timers.t) - 1
if i < 0 || i > last || timers.t[i] != t { if i < 0 || i > last || timers.t[i] != t {
gounlock(&timers.lock) unlock(&timers.lock)
return false return false
} }
if i != last { if i != last {
...@@ -140,7 +140,7 @@ func deltimer(t *timer) bool { ...@@ -140,7 +140,7 @@ func deltimer(t *timer) bool {
siftupTimer(i) siftupTimer(i)
siftdownTimer(i) siftdownTimer(i)
} }
gounlock(&timers.lock) unlock(&timers.lock)
return true return true
} }
...@@ -151,7 +151,7 @@ func timerproc() { ...@@ -151,7 +151,7 @@ func timerproc() {
timers.gp = getg() timers.gp = getg()
timers.gp.issystem = true timers.gp.issystem = true
for { for {
golock(&timers.lock) lock(&timers.lock)
timers.sleeping = false timers.sleeping = false
now := nanotime() now := nanotime()
delta := int64(-1) delta := int64(-1)
...@@ -185,12 +185,12 @@ func timerproc() { ...@@ -185,12 +185,12 @@ func timerproc() {
} }
f := t.f f := t.f
arg := t.arg arg := t.arg
gounlock(&timers.lock) unlock(&timers.lock)
if raceenabled { if raceenabled {
raceacquire(unsafe.Pointer(t)) raceacquire(unsafe.Pointer(t))
} }
f(arg) f(arg)
golock(&timers.lock) lock(&timers.lock)
} }
if delta < 0 { if delta < 0 {
// No timers left - put goroutine to sleep. // No timers left - put goroutine to sleep.
...@@ -201,7 +201,7 @@ func timerproc() { ...@@ -201,7 +201,7 @@ func timerproc() {
// At least one timer pending. Sleep until then. // At least one timer pending. Sleep until then.
timers.sleeping = true timers.sleeping = true
noteclear(&timers.waitnote) noteclear(&timers.waitnote)
gounlock(&timers.lock) unlock(&timers.lock)
notetsleepg(&timers.waitnote, delta) notetsleepg(&timers.waitnote, delta)
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment