Commit b7adc41f authored by Austin Clements's avatar Austin Clements

[dev.garbage] runtime: use s.base() everywhere it makes sense

Currently we have lots of (s.start << _PageShift) and variants. We now
have an s.base() function that returns this. It's faster and more
readable, so use it.

Change-Id: I888060a9dae15ea75ca8cc1c2b31c905e71b452b
Reviewed-on: https://go-review.googlesource.com/22559Reviewed-by: default avatarRick Hudson <rlh@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
parent 2e8b74b6
...@@ -447,7 +447,7 @@ func dumproots() { ...@@ -447,7 +447,7 @@ func dumproots() {
continue continue
} }
spf := (*specialfinalizer)(unsafe.Pointer(sp)) spf := (*specialfinalizer)(unsafe.Pointer(sp))
p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset)) p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
dumpfinalizer(p, spf.fn, spf.fint, spf.ot) dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
} }
} }
...@@ -467,7 +467,7 @@ func dumpobjs() { ...@@ -467,7 +467,7 @@ func dumpobjs() {
if s.state != _MSpanInUse { if s.state != _MSpanInUse {
continue continue
} }
p := uintptr(s.start << _PageShift) p := s.base()
size := s.elemsize size := s.elemsize
n := (s.npages << _PageShift) / size n := (s.npages << _PageShift) / size
if n > uintptr(len(freemark)) { if n > uintptr(len(freemark)) {
...@@ -619,7 +619,7 @@ func dumpmemprof() { ...@@ -619,7 +619,7 @@ func dumpmemprof() {
continue continue
} }
spp := (*specialprofile)(unsafe.Pointer(sp)) spp := (*specialprofile)(unsafe.Pointer(sp))
p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset) p := s.base() + uintptr(spp.special.offset)
dumpint(tagAllocSample) dumpint(tagAllocSample)
dumpint(uint64(p)) dumpint(uint64(p))
dumpint(uint64(uintptr(unsafe.Pointer(spp.b)))) dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
......
...@@ -402,7 +402,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { ...@@ -402,7 +402,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
if s == nil { if s == nil {
return return
} }
x = unsafe.Pointer(uintptr(s.start) << pageShift) x = unsafe.Pointer(s.base())
if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse { if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
s = nil s = nil
......
...@@ -1247,7 +1247,7 @@ func gcDumpObject(label string, obj, off uintptr) { ...@@ -1247,7 +1247,7 @@ func gcDumpObject(label string, obj, off uintptr) {
print(" s=nil\n") print(" s=nil\n")
return return
} }
print(" s.start*_PageSize=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n") print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
skipped := false skipped := false
for i := uintptr(0); i < s.elemsize; i += sys.PtrSize { for i := uintptr(0); i < s.elemsize; i += sys.PtrSize {
// For big objects, just print the beginning (because // For big objects, just print the beginning (because
......
...@@ -808,7 +808,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -808,7 +808,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
} }
case _MSpanInUse: case _MSpanInUse:
if s.allocCount != 0 || s.sweepgen != h.sweepgen { if s.allocCount != 0 || s.sweepgen != h.sweepgen {
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
throw("MHeap_FreeSpanLocked - invalid free") throw("MHeap_FreeSpanLocked - invalid free")
} }
h.pagesInUse -= uint64(s.npages) h.pagesInUse -= uint64(s.npages)
...@@ -892,7 +892,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr { ...@@ -892,7 +892,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
var sumreleased uintptr var sumreleased uintptr
for s := list.first; s != nil; s = s.next { for s := list.first; s != nil; s = s.next {
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
start := uintptr(s.start) << _PageShift start := s.base()
end := start + s.npages<<_PageShift end := start + s.npages<<_PageShift
if sys.PhysPageSize > _PageSize { if sys.PhysPageSize > _PageSize {
// We can only release pages in // We can only release pages in
...@@ -1062,7 +1062,7 @@ func addspecial(p unsafe.Pointer, s *special) bool { ...@@ -1062,7 +1062,7 @@ func addspecial(p unsafe.Pointer, s *special) bool {
mp := acquirem() mp := acquirem()
span.ensureSwept() span.ensureSwept()
offset := uintptr(p) - uintptr(span.start<<_PageShift) offset := uintptr(p) - span.base()
kind := s.kind kind := s.kind
lock(&span.speciallock) lock(&span.speciallock)
...@@ -1110,7 +1110,7 @@ func removespecial(p unsafe.Pointer, kind uint8) *special { ...@@ -1110,7 +1110,7 @@ func removespecial(p unsafe.Pointer, kind uint8) *special {
mp := acquirem() mp := acquirem()
span.ensureSwept() span.ensureSwept()
offset := uintptr(p) - uintptr(span.start<<_PageShift) offset := uintptr(p) - span.base()
lock(&span.speciallock) lock(&span.speciallock)
t := &span.specials t := &span.specials
......
...@@ -198,7 +198,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -198,7 +198,7 @@ func stackpoolalloc(order uint8) gclinkptr {
throw("bad stackfreelist") throw("bad stackfreelist")
} }
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order { for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
x := gclinkptr(uintptr(s.start)<<_PageShift + i) x := gclinkptr(s.base() + i)
x.ptr().next = s.stackfreelist x.ptr().next = s.stackfreelist
s.stackfreelist = x s.stackfreelist = x
} }
...@@ -391,7 +391,7 @@ func stackalloc(n uint32) (stack, []stkbar) { ...@@ -391,7 +391,7 @@ func stackalloc(n uint32) (stack, []stkbar) {
throw("out of memory") throw("out of memory")
} }
} }
v = unsafe.Pointer(s.start << _PageShift) v = unsafe.Pointer(s.base())
} }
if raceenabled { if raceenabled {
...@@ -456,7 +456,7 @@ func stackfree(stk stack, n uintptr) { ...@@ -456,7 +456,7 @@ func stackfree(stk stack, n uintptr) {
} else { } else {
s := mheap_.lookup(v) s := mheap_.lookup(v)
if s.state != _MSpanStack { if s.state != _MSpanStack {
println(hex(s.start<<_PageShift), v) println(hex(s.base()), v)
throw("bad span state") throw("bad span state")
} }
if gcphase == _GCoff { if gcphase == _GCoff {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment