mgcsweep.go 12.6 KB
Newer Older
Russ Cox's avatar
Russ Cox committed
1 2 3 4 5 6 7 8
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Garbage collector: sweeping

package runtime

9 10 11 12
import (
	"runtime/internal/atomic"
	"unsafe"
)
Russ Cox's avatar
Russ Cox committed
13 14 15 16 17

var sweep sweepdata

// State of background sweep.
type sweepdata struct {
18
	lock    mutex
Russ Cox's avatar
Russ Cox committed
19 20 21 22 23 24 25 26 27 28 29
	g       *g
	parked  bool
	started bool

	spanidx uint32 // background sweeper position

	nbgsweep    uint32
	npausesweep uint32
}

//go:nowritebarrier
30 31 32 33 34 35
func finishsweep_m(stw bool) {
	// Sweeping must be complete before marking commences, so
	// sweep any unswept spans. If this is a concurrent GC, there
	// shouldn't be any spans left to sweep, so this should finish
	// instantly. If GC was forced before the concurrent sweep
	// finished, there may be spans to sweep.
Russ Cox's avatar
Russ Cox committed
36 37 38 39 40 41
	for sweepone() != ^uintptr(0) {
		sweep.npausesweep++
	}

	// There may be some other spans being swept concurrently that
	// we need to wait for. If finishsweep_m is done with the world stopped
42 43 44 45 46 47 48 49 50 51
	// this is not required because the STW must have waited for sweeps.
	//
	// TODO(austin): As of this writing, we always pass true for stw.
	// Consider removing this code.
	if !stw {
		sg := mheap_.sweepgen
		for _, s := range work.spans {
			if s.sweepgen != sg && s.state == _MSpanInUse {
				mSpan_EnsureSwept(s)
			}
Russ Cox's avatar
Russ Cox committed
52 53 54 55
		}
	}
}

56
func bgsweep(c chan int) {
Russ Cox's avatar
Russ Cox committed
57
	sweep.g = getg()
58 59 60 61

	lock(&sweep.lock)
	sweep.parked = true
	c <- 1
62
	goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
63

Russ Cox's avatar
Russ Cox committed
64 65 66 67 68
	for {
		for gosweepone() != ^uintptr(0) {
			sweep.nbgsweep++
			Gosched()
		}
69
		lock(&sweep.lock)
Russ Cox's avatar
Russ Cox committed
70 71 72 73
		if !gosweepdone() {
			// This can happen if a GC runs between
			// gosweepone returning ^0 above
			// and the lock being acquired.
74
			unlock(&sweep.lock)
Russ Cox's avatar
Russ Cox committed
75 76 77
			continue
		}
		sweep.parked = true
78
		goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
Russ Cox's avatar
Russ Cox committed
79 80 81 82 83 84 85 86 87 88 89 90 91 92
	}
}

// sweeps one span
// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
//go:nowritebarrier
func sweepone() uintptr {
	_g_ := getg()

	// increment locks to ensure that the goroutine is not preempted
	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
	_g_.m.locks++
	sg := mheap_.sweepgen
	for {
93
		idx := atomic.Xadd(&sweep.spanidx, 1) - 1
Russ Cox's avatar
Russ Cox committed
94 95 96 97 98 99 100 101 102 103
		if idx >= uint32(len(work.spans)) {
			mheap_.sweepdone = 1
			_g_.m.locks--
			return ^uintptr(0)
		}
		s := work.spans[idx]
		if s.state != mSpanInUse {
			s.sweepgen = sg
			continue
		}
104
		if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
Russ Cox's avatar
Russ Cox committed
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
			continue
		}
		npages := s.npages
		if !mSpan_Sweep(s, false) {
			npages = 0
		}
		_g_.m.locks--
		return npages
	}
}

//go:nowritebarrier
func gosweepone() uintptr {
	var ret uintptr
	systemstack(func() {
		ret = sweepone()
	})
	return ret
}

//go:nowritebarrier
func gosweepdone() bool {
	return mheap_.sweepdone != 0
}

// Returns only when span s has been swept.
//go:nowritebarrier
func mSpan_EnsureSwept(s *mspan) {
	// Caller must disable preemption.
	// Otherwise when this function returns the span can become unswept again
	// (if GC is triggered on another goroutine).
	_g_ := getg()
	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
		throw("MSpan_EnsureSwept: m is not locked")
	}

	sg := mheap_.sweepgen
142
	if atomic.Load(&s.sweepgen) == sg {
Russ Cox's avatar
Russ Cox committed
143 144 145
		return
	}
	// The caller must be sure that the span is a MSpanInUse span.
146
	if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
Russ Cox's avatar
Russ Cox committed
147 148 149 150
		mSpan_Sweep(s, false)
		return
	}
	// unfortunate condition, and we don't have efficient means to wait
151
	for atomic.Load(&s.sweepgen) != sg {
Russ Cox's avatar
Russ Cox committed
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
		osyield()
	}
}

// Sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round.
// Returns true if the span was returned to heap.
// If preserve=true, don't return it to heap nor relink in MCentral lists;
// caller takes care of it.
//TODO go:nowritebarrier
func mSpan_Sweep(s *mspan, preserve bool) bool {
	// It's critical that we enter this function with preemption disabled,
	// GC must not start while we are in the middle of this function.
	_g_ := getg()
	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
		throw("MSpan_Sweep: m is not locked")
	}
	sweepgen := mheap_.sweepgen
	if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
		print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
		throw("MSpan_Sweep: bad span state")
	}

	if trace.enabled {
		traceGCSweepStart()
	}

179
	atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
180

Russ Cox's avatar
Russ Cox committed
181 182 183
	cl := s.sizeclass
	size := s.elemsize
	res := false
184
	nfree := 0
Russ Cox's avatar
Russ Cox committed
185 186 187 188

	var head, end gclinkptr

	c := _g_.m.mcache
189
	freeToHeap := false
Russ Cox's avatar
Russ Cox committed
190 191

	// Mark any free objects in this span so we don't collect them.
192
	sstart := uintptr(s.start << _PageShift)
Russ Cox's avatar
Russ Cox committed
193
	for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
194 195 196 197 198
		if uintptr(link) < sstart || s.limit <= uintptr(link) {
			// Free list is corrupted.
			dumpFreeList(s)
			throw("free list corrupted")
		}
Russ Cox's avatar
Russ Cox committed
199 200 201 202
		heapBitsForAddr(uintptr(link)).setMarkedNonAtomic()
	}

	// Unlink & free special records for any objects we're about to free.
203 204 205 206 207 208 209
	// Two complications here:
	// 1. An object can have both finalizer and profile special records.
	//    In such case we need to queue finalizer for execution,
	//    mark the object as live and preserve the profile special.
	// 2. A tiny object can have several finalizers setup for different offsets.
	//    If such object is not marked, we need to queue all finalizers at once.
	// Both 1 and 2 are possible at the same time.
Russ Cox's avatar
Russ Cox committed
210 211 212 213 214 215 216
	specialp := &s.specials
	special := *specialp
	for special != nil {
		// A finalizer can be set for an inner byte of an object, find object beginning.
		p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
		hbits := heapBitsForAddr(p)
		if !hbits.isMarked() {
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
			// This object is not marked and has at least one special record.
			// Pass 1: see if it has at least one finalizer.
			hasFin := false
			endOffset := p - uintptr(s.start<<_PageShift) + size
			for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
				if tmp.kind == _KindSpecialFinalizer {
					// Stop freeing of object if it has a finalizer.
					hbits.setMarkedNonAtomic()
					hasFin = true
					break
				}
			}
			// Pass 2: queue all finalizers _or_ handle profile record.
			for special != nil && uintptr(special.offset) < endOffset {
				// Find the exact byte for which the special was setup
				// (as opposed to object beginning).
				p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
				if special.kind == _KindSpecialFinalizer || !hasFin {
					// Splice out special record.
					y := special
					special = special.next
					*specialp = special
Dmitry Vyukov's avatar
Dmitry Vyukov committed
239
					freespecial(y, unsafe.Pointer(p), size)
240 241 242 243 244 245
				} else {
					// This is profile record, but the object has finalizers (so kept alive).
					// Keep special record.
					specialp = &special.next
					special = *specialp
				}
Russ Cox's avatar
Russ Cox committed
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
			}
		} else {
			// object is still live: keep special record
			specialp = &special.next
			special = *specialp
		}
	}

	// Sweep through n objects of given size starting at p.
	// This thread owns the span now, so it can manipulate
	// the block bitmap without atomic operations.

	size, n, _ := s.layout()
	heapBitsSweepSpan(s.base(), size, n, func(p uintptr) {
		// At this point we know that we are looking at garbage object
		// that needs to be collected.
		if debug.allocfreetrace != 0 {
			tracefree(unsafe.Pointer(p), size)
		}
265 266 267
		if msanenabled {
			msanfree(unsafe.Pointer(p), size)
		}
Russ Cox's avatar
Russ Cox committed
268 269 270 271 272 273 274

		// Reset to allocated+noscan.
		if cl == 0 {
			// Free large span.
			if preserve {
				throw("can't preserve large span")
			}
275
			heapBitsForSpan(p).initSpan(s.layout())
Russ Cox's avatar
Russ Cox committed
276 277
			s.needzero = 1

278 279 280
			// Free the span after heapBitsSweepSpan
			// returns, since it's not done with the span.
			freeToHeap = true
Russ Cox's avatar
Russ Cox committed
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
		} else {
			// Free small object.
			if size > 2*ptrSize {
				*(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
			} else if size > ptrSize {
				*(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
			}
			if head.ptr() == nil {
				head = gclinkptr(p)
			} else {
				end.ptr().next = gclinkptr(p)
			}
			end = gclinkptr(p)
			end.ptr().next = gclinkptr(0x0bade5)
			nfree++
		}
	})

	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
	// because of the potential for a concurrent free/SetFinalizer.
	// But we need to set it before we make the span available for allocation
	// (return it to heap or mcentral), because allocation code assumes that a
	// span is already swept if available for allocation.
304
	if freeToHeap || nfree == 0 {
Russ Cox's avatar
Russ Cox committed
305 306 307 308 309 310
		// The span must be in our exclusive ownership until we update sweepgen,
		// check for potential races.
		if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
			print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
			throw("MSpan_Sweep: bad span state after sweep")
		}
311
		atomic.Store(&s.sweepgen, sweepgen)
Russ Cox's avatar
Russ Cox committed
312 313 314 315 316
	}
	if nfree > 0 {
		c.local_nsmallfree[cl] += uintptr(nfree)
		res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
		// MCentral_FreeSpan updates sweepgen
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	} else if freeToHeap {
		// Free large span to heap

		// NOTE(rsc,dvyukov): The original implementation of efence
		// in CL 22060046 used SysFree instead of SysFault, so that
		// the operating system would eventually give the memory
		// back to us again, so that an efence program could run
		// longer without running out of memory. Unfortunately,
		// calling SysFree here without any kind of adjustment of the
		// heap data structures means that when the memory does
		// come back to us, we have the wrong metadata for it, either in
		// the MSpan structures or in the garbage collection bitmap.
		// Using SysFault here means that the program will run out of
		// memory fairly quickly in efence mode, but at least it won't
		// have mysterious crashes due to confused memory reuse.
		// It should be possible to switch back to SysFree if we also
		// implement and then call some kind of MHeap_DeleteSpan.
		if debug.efence > 0 {
			s.limit = 0 // prevent mlookup from finding this span
			sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
		} else {
			mHeap_Free(&mheap_, s, 1)
		}
		c.local_nlargefree++
		c.local_largefree += size
		res = true
Russ Cox's avatar
Russ Cox committed
343 344 345 346 347 348
	}
	if trace.enabled {
		traceGCSweepDone()
	}
	return res
}
349

350 351 352 353 354 355 356
// deductSweepCredit deducts sweep credit for allocating a span of
// size spanBytes. This must be performed *before* the span is
// allocated to ensure the system has enough credit. If necessary, it
// performs sweeping to prevent going in to debt. If the caller will
// also sweep pages (e.g., for a large allocation), it can pass a
// non-zero callerSweepPages to leave that many pages unswept.
//
357 358 359 360 361
// deductSweepCredit makes a worst-case assumption that all spanBytes
// bytes of the ultimately allocated span will be available for object
// allocation. The caller should call reimburseSweepCredit if that
// turns out not to be the case once the span is allocated.
//
362 363 364 365 366 367 368 369 370 371 372 373 374
// deductSweepCredit is the core of the "proportional sweep" system.
// It uses statistics gathered by the garbage collector to perform
// enough sweeping so that all pages are swept during the concurrent
// sweep phase between GC cycles.
//
// mheap_ must NOT be locked.
func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
	if mheap_.sweepPagesPerByte == 0 {
		// Proportional sweep is done or disabled.
		return
	}

	// Account for this span allocation.
375
	spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
376 377 378

	// Fix debt if necessary.
	pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
379
	for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
380 381 382 383 384 385 386
		if gosweepone() == ^uintptr(0) {
			mheap_.sweepPagesPerByte = 0
			break
		}
	}
}

387 388 389 390 391 392 393 394
// reimburseSweepCredit records that unusableBytes bytes of a
// just-allocated span are not available for object allocation. This
// offsets the worst-case charge performed by deductSweepCredit.
func reimburseSweepCredit(unusableBytes uintptr) {
	if mheap_.sweepPagesPerByte == 0 {
		// Nobody cares about the credit. Avoid the atomic.
		return
	}
395
	atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes))
396 397
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
func dumpFreeList(s *mspan) {
	printlock()
	print("runtime: free list of span ", s, ":\n")
	sstart := uintptr(s.start << _PageShift)
	link := s.freelist
	for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ {
		if i != 0 {
			print(" -> ")
		}
		print(hex(link))
		if link.ptr() == nil {
			break
		}
		if uintptr(link) < sstart || s.limit <= uintptr(link) {
			// Bad link. Stop walking before we crash.
			print(" (BAD)")
			break
		}
		link = link.ptr().next
	}
	print("\n")
	printunlock()
}