Commit 3ebe7d7d authored by Austin Clements's avatar Austin Clements

runtime: pull heap profile cycle into a type

Currently memRecord has the same set of four fields repeated three
times. Pull these into a type and use this type three times. This
cleans up and simplifies the code a bit and will make it easier to
switch to a globally tracked heap profile cycle for #19311.

Change-Id: I414d15673feaa406a8366b48784437c642997cf2
Reviewed-on: https://go-review.googlesource.com/37713
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 42aa608f
...@@ -97,22 +97,30 @@ type memRecord struct { ...@@ -97,22 +97,30 @@ type memRecord struct {
// GC frees are accounted in prev stats. // GC frees are accounted in prev stats.
// After GC prev stats are added to final stats and // After GC prev stats are added to final stats and
// recent stats are moved into prev stats. // recent stats are moved into prev stats.
allocs uintptr
frees uintptr // active is the currently published profile. A profiling
alloc_bytes uintptr // cycle can be accumulated into active once its complete.
free_bytes uintptr active memRecordCycle
// changes between next-to-last GC and last GC // changes between next-to-last GC and last GC
prev_allocs uintptr prev memRecordCycle
prev_frees uintptr
prev_alloc_bytes uintptr
prev_free_bytes uintptr
// changes since last GC // changes since last GC
recent_allocs uintptr recent memRecordCycle
recent_frees uintptr }
recent_alloc_bytes uintptr
recent_free_bytes uintptr // memRecordCycle
type memRecordCycle struct {
allocs, frees uintptr
alloc_bytes, free_bytes uintptr
}
// add accumulates b into a. It does not zero b.
func (a *memRecordCycle) add(b *memRecordCycle) {
a.allocs += b.allocs
a.frees += b.frees
a.alloc_bytes += b.alloc_bytes
a.free_bytes += b.free_bytes
} }
// A blockRecord is the bucket data for a bucket of type blockProfile, // A blockRecord is the bucket data for a bucket of type blockProfile,
...@@ -243,20 +251,10 @@ func eqslice(x, y []uintptr) bool { ...@@ -243,20 +251,10 @@ func eqslice(x, y []uintptr) bool {
func mprof_GC() { func mprof_GC() {
for b := mbuckets; b != nil; b = b.allnext { for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp() mp := b.mp()
mp.allocs += mp.prev_allocs
mp.frees += mp.prev_frees
mp.alloc_bytes += mp.prev_alloc_bytes
mp.free_bytes += mp.prev_free_bytes
mp.prev_allocs = mp.recent_allocs
mp.prev_frees = mp.recent_frees
mp.prev_alloc_bytes = mp.recent_alloc_bytes
mp.prev_free_bytes = mp.recent_free_bytes
mp.recent_allocs = 0 mp.active.add(&mp.prev)
mp.recent_frees = 0 mp.prev = mp.recent
mp.recent_alloc_bytes = 0 mp.recent = memRecordCycle{}
mp.recent_free_bytes = 0
} }
} }
...@@ -274,8 +272,8 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) { ...@@ -274,8 +272,8 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
lock(&proflock) lock(&proflock)
b := stkbucket(memProfile, size, stk[:nstk], true) b := stkbucket(memProfile, size, stk[:nstk], true)
mp := b.mp() mp := b.mp()
mp.recent_allocs++ mp.recent.allocs++
mp.recent_alloc_bytes += size mp.recent.alloc_bytes += size
unlock(&proflock) unlock(&proflock)
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock. // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
...@@ -291,8 +289,8 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) { ...@@ -291,8 +289,8 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
func mProf_Free(b *bucket, size uintptr) { func mProf_Free(b *bucket, size uintptr) {
lock(&proflock) lock(&proflock)
mp := b.mp() mp := b.mp()
mp.prev_frees++ mp.prev.frees++
mp.prev_free_bytes += size mp.prev.free_bytes += size
unlock(&proflock) unlock(&proflock)
} }
...@@ -472,10 +470,10 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { ...@@ -472,10 +470,10 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
clear := true clear := true
for b := mbuckets; b != nil; b = b.allnext { for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp() mp := b.mp()
if inuseZero || mp.alloc_bytes != mp.free_bytes { if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
n++ n++
} }
if mp.allocs != 0 || mp.frees != 0 { if mp.active.allocs != 0 || mp.active.frees != 0 {
clear = false clear = false
} }
} }
...@@ -489,7 +487,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { ...@@ -489,7 +487,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
n = 0 n = 0
for b := mbuckets; b != nil; b = b.allnext { for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp() mp := b.mp()
if inuseZero || mp.alloc_bytes != mp.free_bytes { if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
n++ n++
} }
} }
...@@ -499,7 +497,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { ...@@ -499,7 +497,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
idx := 0 idx := 0
for b := mbuckets; b != nil; b = b.allnext { for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp() mp := b.mp()
if inuseZero || mp.alloc_bytes != mp.free_bytes { if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
record(&p[idx], b) record(&p[idx], b)
idx++ idx++
} }
...@@ -512,10 +510,10 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { ...@@ -512,10 +510,10 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
// Write b's data to r. // Write b's data to r.
func record(r *MemProfileRecord, b *bucket) { func record(r *MemProfileRecord, b *bucket) {
mp := b.mp() mp := b.mp()
r.AllocBytes = int64(mp.alloc_bytes) r.AllocBytes = int64(mp.active.alloc_bytes)
r.FreeBytes = int64(mp.free_bytes) r.FreeBytes = int64(mp.active.free_bytes)
r.AllocObjects = int64(mp.allocs) r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.frees) r.FreeObjects = int64(mp.active.frees)
if raceenabled { if raceenabled {
racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&r)), funcPC(MemProfile)) racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&r)), funcPC(MemProfile))
} }
...@@ -532,7 +530,7 @@ func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintp ...@@ -532,7 +530,7 @@ func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintp
lock(&proflock) lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext { for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp() mp := b.mp()
fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees) fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
} }
unlock(&proflock) unlock(&proflock)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment