blob: 1bb709c8957503ffd454e2cb98c2e49c4f2f0386 [file] [log] [blame]
Russ Cox484f8012015-02-19 13:38:46 -05001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Garbage collector: marking and scanning
6
7package runtime
8
9import "unsafe"
10
11// Scan all of the stacks, greying (or graying if in America) the referents
12// but not blackening them since the mark write barrier isn't installed.
13//go:nowritebarrier
14func gcscan_m() {
15 _g_ := getg()
16
17 // Grab the g that called us and potentially allow rescheduling.
18 // This allows it to be scanned like other goroutines.
19 mastergp := _g_.m.curg
20 casgstatus(mastergp, _Grunning, _Gwaiting)
21 mastergp.waitreason = "garbage collection scan"
22
23 // Span sweeping has been done by finishsweep_m.
24 // Long term we will want to make this goroutine runnable
25 // by placing it onto a scanenqueue state and then calling
26 // runtimeĀ·restartg(mastergp) to make it Grunnable.
27 // At the bottom we will want to return this p back to the scheduler.
28
29 // Prepare flag indicating that the scan has not been completed.
Austin Clementsb3d791c2015-02-24 22:20:38 -050030 local_allglen := gcResetGState()
Russ Cox484f8012015-02-19 13:38:46 -050031
32 work.nwait = 0
33 work.ndone = 0
34 work.nproc = 1 // For now do not do this in parallel.
35 // ackgcphase is not needed since we are not scanning running goroutines.
36 parforsetup(work.markfor, work.nproc, uint32(_RootCount+local_allglen), false, markroot)
37 parfordo(work.markfor)
38
39 lock(&allglock)
40 // Check that gc work is done.
Austin Clementsb3d791c2015-02-24 22:20:38 -050041 for i := 0; i < local_allglen; i++ {
Russ Cox484f8012015-02-19 13:38:46 -050042 gp := allgs[i]
43 if !gp.gcworkdone {
44 throw("scan missed a g")
45 }
46 }
47 unlock(&allglock)
48
49 casgstatus(mastergp, _Gwaiting, _Grunning)
50 // Let the g that called us continue to run.
51}
52
53// ptrmask for an allocation containing a single pointer.
54var oneptr = [...]uint8{typePointer}
55
56//go:nowritebarrier
57func markroot(desc *parfor, i uint32) {
Austin Clements1b4025f2015-04-19 15:22:20 -040058 // TODO: Consider using getg().m.p.ptr().gcw.
Austin Clementscadd4f82015-03-12 13:09:30 -040059 var gcw gcWork
Russ Cox484f8012015-02-19 13:38:46 -050060
Keith Randallcd5b1442015-03-11 12:58:47 -070061 // Note: if you add a case here, please also update heapdump.go:dumproots.
Russ Cox484f8012015-02-19 13:38:46 -050062 switch i {
63 case _RootData:
Michael Hudson-Doylea1f57592015-04-07 12:55:02 +120064 for datap := &firstmoduledata; datap != nil; datap = datap.next {
Michael Hudson-Doylefae4a122015-03-29 21:59:00 +000065 scanblock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, &gcw)
66 }
Russ Cox484f8012015-02-19 13:38:46 -050067
68 case _RootBss:
Michael Hudson-Doylea1f57592015-04-07 12:55:02 +120069 for datap := &firstmoduledata; datap != nil; datap = datap.next {
Michael Hudson-Doylefae4a122015-03-29 21:59:00 +000070 scanblock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, &gcw)
71 }
Russ Cox484f8012015-02-19 13:38:46 -050072
73 case _RootFinalizers:
74 for fb := allfin; fb != nil; fb = fb.alllink {
75 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], &gcw)
76 }
77
78 case _RootSpans:
79 // mark MSpan.specials
80 sg := mheap_.sweepgen
81 for spanidx := uint32(0); spanidx < uint32(len(work.spans)); spanidx++ {
82 s := work.spans[spanidx]
83 if s.state != mSpanInUse {
84 continue
85 }
Russ Cox89a091d2015-02-19 16:43:27 -050086 if !useCheckmark && s.sweepgen != sg {
Russ Cox484f8012015-02-19 13:38:46 -050087 // sweepgen was updated (+2) during non-checkmark GC pass
88 print("sweep ", s.sweepgen, " ", sg, "\n")
89 throw("gc: unswept span")
90 }
91 for sp := s.specials; sp != nil; sp = sp.next {
92 if sp.kind != _KindSpecialFinalizer {
93 continue
94 }
95 // don't mark finalized object, but scan it so we
96 // retain everything it points to.
97 spf := (*specialfinalizer)(unsafe.Pointer(sp))
98 // A finalizer can be set for an inner byte of an object, find object beginning.
99 p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
100 if gcphase != _GCscan {
101 scanblock(p, s.elemsize, nil, &gcw) // scanned during mark phase
102 }
103 scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptr[0], &gcw)
104 }
105 }
106
107 case _RootFlushCaches:
108 if gcphase != _GCscan { // Do not flush mcaches during GCscan phase.
109 flushallmcaches()
110 }
111
112 default:
113 // the rest is scanning goroutine stacks
114 if uintptr(i-_RootCount) >= allglen {
115 throw("markroot: bad index")
116 }
117 gp := allgs[i-_RootCount]
118
119 // remember when we've first observed the G blocked
120 // needed only to output in traceback
121 status := readgstatus(gp) // We are not in a scan state
122 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
123 gp.waitsince = work.tstart
124 }
125
126 // Shrink a stack if not much of it is being used but not in the scan phase.
127 if gcphase == _GCmarktermination {
128 // Shrink during STW GCmarktermination phase thus avoiding
129 // complications introduced by shrinking during
130 // non-STW phases.
131 shrinkstack(gp)
132 }
133 if readgstatus(gp) == _Gdead {
134 gp.gcworkdone = true
135 } else {
136 gp.gcworkdone = false
137 }
138 restart := stopg(gp)
139
140 // goroutine will scan its own stack when it stops running.
141 // Wait until it has.
142 for readgstatus(gp) == _Grunning && !gp.gcworkdone {
143 }
144
145 // scanstack(gp) is done as part of gcphasework
146 // But to make sure we finished we need to make sure that
147 // the stack traps have all responded so drop into
148 // this while loop until they respond.
149 for !gp.gcworkdone {
150 status = readgstatus(gp)
151 if status == _Gdead {
152 gp.gcworkdone = true // scan is a noop
153 break
154 }
155 if status == _Gwaiting || status == _Grunnable {
156 restart = stopg(gp)
157 }
158 }
159 if restart {
160 restartg(gp)
161 }
162 }
Austin Clements50a66562015-03-12 16:53:57 -0400163
164 // Root aren't part of the heap, so don't count them toward
165 // marked heap bytes.
166 gcw.bytesMarked = 0
Austin Clements571ebae2015-03-12 12:08:47 -0400167 gcw.scanWork = 0
Russ Cox484f8012015-02-19 13:38:46 -0500168 gcw.dispose()
169}
170
Austin Clements4b2fde92015-03-16 14:22:00 -0400171// gcAssistAlloc records and allocation of size bytes and, if
172// allowAssist is true, may assist GC scanning in proportion to the
173// allocations performed by this mutator since the last assist.
174//
Austin Clementsbb632052015-03-27 17:01:53 -0400175// It should only be called if gcAssistAlloc != 0.
Austin Clements1b4025f2015-04-19 15:22:20 -0400176//
177// This must be called with preemption disabled.
Russ Cox484f8012015-02-19 13:38:46 -0500178//go:nowritebarrier
Austin Clements4b2fde92015-03-16 14:22:00 -0400179func gcAssistAlloc(size uintptr, allowAssist bool) {
180 // Find the G responsible for this assist.
181 gp := getg()
182 if gp.m.curg != nil {
183 gp = gp.m.curg
184 }
185
186 // Record allocation.
187 gp.gcalloc += size
188
189 if !allowAssist {
190 return
191 }
192
193 // Compute the amount of assist scan work we need to do.
194 scanWork := int64(gcController.assistRatio*float64(gp.gcalloc)) - gp.gcscanwork
195 // scanWork can be negative if the last assist scanned a large
196 // object and we're still ahead of our assist goal.
197 if scanWork <= 0 {
198 return
199 }
200
201 // Steal as much credit as we can from the background GC's
202 // scan credit. This is racy and may drop the background
203 // credit below 0 if two mutators steal at the same time. This
204 // will just cause steals to fail until credit is accumulated
205 // again, so in the long run it doesn't really matter, but we
206 // do have to handle the negative credit case.
207 bgScanCredit := atomicloadint64(&gcController.bgScanCredit)
208 stolen := int64(0)
209 if bgScanCredit > 0 {
210 if bgScanCredit < scanWork {
211 stolen = bgScanCredit
212 } else {
213 stolen = scanWork
214 }
215 xaddint64(&gcController.bgScanCredit, -scanWork)
216
217 scanWork -= stolen
218 gp.gcscanwork += stolen
219
220 if scanWork == 0 {
221 return
222 }
223 }
224
225 // Perform assist work
226 systemstack(func() {
Austin Clements100da602015-03-17 12:17:47 -0400227 // Track time spent in this assist. Since we're on the
228 // system stack, this is non-preemptible, so we can
229 // just measure start and end time.
230 startTime := nanotime()
231
Austin Clements8d03acc2015-03-23 21:07:33 -0400232 xadd(&work.nwait, -1)
233
Austin Clements1b4025f2015-04-19 15:22:20 -0400234 // drain own cached work first in the hopes that it
Austin Clements4b2fde92015-03-16 14:22:00 -0400235 // will be more cache friendly.
Austin Clements1b4025f2015-04-19 15:22:20 -0400236 gcw := &getg().m.p.ptr().gcw
Austin Clements4b2fde92015-03-16 14:22:00 -0400237 startScanWork := gcw.scanWork
Austin Clements1b4025f2015-04-19 15:22:20 -0400238 gcDrainN(gcw, scanWork)
Austin Clements4b2fde92015-03-16 14:22:00 -0400239 // Record that we did this much scan work.
240 gp.gcscanwork += gcw.scanWork - startScanWork
Austin Clements1b4025f2015-04-19 15:22:20 -0400241 // No need to dispose since we're not in mark termination.
Austin Clements100da602015-03-17 12:17:47 -0400242
Austin Clements8d03acc2015-03-23 21:07:33 -0400243 // If this is the last worker and we ran out of work,
244 // signal a completion point.
245 if xadd(&work.nwait, +1) == work.nproc && work.full == 0 && work.partial == 0 {
246 // This has reached a background completion
Austin Clementsce502b02015-04-22 17:44:36 -0400247 // point.
248 gcBgMarkDone()
Austin Clements8d03acc2015-03-23 21:07:33 -0400249 }
250
Austin Clements100da602015-03-17 12:17:47 -0400251 duration := nanotime() - startTime
252 _p_ := gp.m.p.ptr()
253 _p_.gcAssistTime += duration
254 if _p_.gcAssistTime > gcAssistTimeSlack {
255 xaddint64(&gcController.assistTime, _p_.gcAssistTime)
256 _p_.gcAssistTime = 0
257 }
Austin Clements4b2fde92015-03-16 14:22:00 -0400258 })
Russ Cox484f8012015-02-19 13:38:46 -0500259}
260
261// The gp has been moved to a GC safepoint. GC phase specific
262// work is done here.
263//go:nowritebarrier
264func gcphasework(gp *g) {
265 switch gcphase {
266 default:
267 throw("gcphasework in bad gcphase")
268 case _GCoff, _GCquiesce, _GCstw, _GCsweep:
269 // No work.
270 case _GCscan:
271 // scan the stack, mark the objects, put pointers in work buffers
272 // hanging off the P where this is being run.
273 // Indicate that the scan is valid until the goroutine runs again
274 scanstack(gp)
275 case _GCmark:
276 // No work.
277 case _GCmarktermination:
278 scanstack(gp)
279 // All available mark work will be emptied before returning.
280 }
281 gp.gcworkdone = true
282}
283
284//go:nowritebarrier
285func scanstack(gp *g) {
286 if gp.gcscanvalid {
287 return
288 }
289
290 if readgstatus(gp)&_Gscan == 0 {
291 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
292 throw("scanstack - bad status")
293 }
294
295 switch readgstatus(gp) &^ _Gscan {
296 default:
297 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
298 throw("mark - bad status")
299 case _Gdead:
300 return
301 case _Grunning:
302 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
303 throw("scanstack: goroutine not stopped")
304 case _Grunnable, _Gsyscall, _Gwaiting:
305 // ok
306 }
307
308 if gp == getg() {
309 throw("can't scan our own stack")
310 }
311 mp := gp.m
312 if mp != nil && mp.helpgc != 0 {
313 throw("can't scan gchelper stack")
314 }
315
Austin Clements1b4025f2015-04-19 15:22:20 -0400316 gcw := &getg().m.p.ptr().gcw
317 origBytesMarked := gcw.bytesMarked
318 origScanWork := gcw.scanWork
Russ Cox484f8012015-02-19 13:38:46 -0500319 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
320 // Pick up gcw as free variable so gentraceback and friends can
321 // keep the same signature.
Austin Clements1b4025f2015-04-19 15:22:20 -0400322 scanframeworker(frame, unused, gcw)
Russ Cox484f8012015-02-19 13:38:46 -0500323 return true
324 }
325 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
326 tracebackdefers(gp, scanframe, nil)
Austin Clements50a66562015-03-12 16:53:57 -0400327 // Stacks aren't part of the heap, so don't count them toward
328 // marked heap bytes.
Austin Clements1b4025f2015-04-19 15:22:20 -0400329 gcw.bytesMarked = origBytesMarked
330 gcw.scanWork = origScanWork
331 if gcphase == _GCmarktermination {
332 gcw.dispose()
333 }
Russ Cox484f8012015-02-19 13:38:46 -0500334 gp.gcscanvalid = true
335}
336
337// Scan a stack frame: local variables and function arguments/results.
338//go:nowritebarrier
Austin Clementscadd4f82015-03-12 13:09:30 -0400339func scanframeworker(frame *stkframe, unused unsafe.Pointer, gcw *gcWork) {
Russ Cox484f8012015-02-19 13:38:46 -0500340
341 f := frame.fn
342 targetpc := frame.continpc
343 if targetpc == 0 {
344 // Frame is dead.
345 return
346 }
347 if _DebugGC > 1 {
348 print("scanframe ", funcname(f), "\n")
349 }
350 if targetpc != f.entry {
351 targetpc--
352 }
353 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
354 if pcdata == -1 {
355 // We do not have a valid pcdata value but there might be a
356 // stackmap for this function. It is likely that we are looking
357 // at the function prologue, assume so and hope for the best.
358 pcdata = 0
359 }
360
361 // Scan local variables if stack frame has been allocated.
362 size := frame.varp - frame.sp
363 var minsize uintptr
Aram Hăvărneanu846ee042015-03-08 14:20:20 +0100364 switch thechar {
365 case '6', '8':
Russ Cox484f8012015-02-19 13:38:46 -0500366 minsize = 0
Aram Hăvărneanu846ee042015-03-08 14:20:20 +0100367 case '7':
368 minsize = spAlign
369 default:
370 minsize = ptrSize
Russ Cox484f8012015-02-19 13:38:46 -0500371 }
372 if size > minsize {
373 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
374 if stkmap == nil || stkmap.n <= 0 {
375 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
376 throw("missing stackmap")
377 }
378
379 // Locals bitmap information, scan just the pointers in locals.
380 if pcdata < 0 || pcdata >= stkmap.n {
381 // don't know where we are
382 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
383 throw("scanframe: bad symbol table")
384 }
385 bv := stackmapdata(stkmap, pcdata)
386 size = (uintptr(bv.n) / typeBitsWidth) * ptrSize
387 scanblock(frame.varp-size, size, bv.bytedata, gcw)
388 }
389
390 // Scan arguments.
391 if frame.arglen > 0 {
392 var bv bitvector
393 if frame.argmap != nil {
394 bv = *frame.argmap
395 } else {
396 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
397 if stkmap == nil || stkmap.n <= 0 {
398 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
399 throw("missing stackmap")
400 }
401 if pcdata < 0 || pcdata >= stkmap.n {
402 // don't know where we are
403 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
404 throw("scanframe: bad symbol table")
405 }
406 bv = stackmapdata(stkmap, pcdata)
407 }
408 scanblock(frame.argp, uintptr(bv.n)/typeBitsWidth*ptrSize, bv.bytedata, gcw)
409 }
410}
411
Austin Clements8d03acc2015-03-23 21:07:33 -0400412// TODO(austin): Can we consolidate the gcDrain* functions?
413
Austin Clementsa4374c12015-03-20 13:21:51 -0400414// gcDrain scans objects in work buffers, blackening grey
Russ Cox484f8012015-02-19 13:38:46 -0500415// objects until all work buffers have been drained.
Austin Clements8e242832015-03-13 13:29:23 -0400416// If flushScanCredit != -1, gcDrain flushes accumulated scan work
417// credit to gcController.bgScanCredit whenever gcw's local scan work
418// credit exceeds flushScanCredit.
Russ Cox484f8012015-02-19 13:38:46 -0500419//go:nowritebarrier
Austin Clements8e242832015-03-13 13:29:23 -0400420func gcDrain(gcw *gcWork, flushScanCredit int64) {
Russ Cox484f8012015-02-19 13:38:46 -0500421 if gcphase != _GCmark && gcphase != _GCmarktermination {
422 throw("scanblock phase incorrect")
423 }
424
Austin Clements8e242832015-03-13 13:29:23 -0400425 var lastScanFlush, nextScanFlush int64
426 if flushScanCredit != -1 {
427 lastScanFlush = gcw.scanWork
428 nextScanFlush = lastScanFlush + flushScanCredit
429 } else {
430 nextScanFlush = int64(^uint64(0) >> 1)
431 }
432
Russ Cox484f8012015-02-19 13:38:46 -0500433 for {
434 // If another proc wants a pointer, give it some.
435 if work.nwait > 0 && work.full == 0 {
436 gcw.balance()
437 }
438
439 b := gcw.get()
440 if b == 0 {
441 // work barrier reached
442 break
443 }
444 // If the current wbuf is filled by the scan a new wbuf might be
445 // returned that could possibly hold only a single object. This
446 // could result in each iteration draining only a single object
447 // out of the wbuf passed in + a single object placed
448 // into an empty wbuf in scanobject so there could be
449 // a performance hit as we keep fetching fresh wbufs.
Austin Clementscadd4f82015-03-12 13:09:30 -0400450 scanobject(b, 0, nil, gcw)
Austin Clements8e242832015-03-13 13:29:23 -0400451
452 // Flush background scan work credit to the global
453 // account if we've accumulated enough locally so
454 // mutator assists can draw on it.
455 if gcw.scanWork >= nextScanFlush {
456 credit := gcw.scanWork - lastScanFlush
457 xaddint64(&gcController.bgScanCredit, credit)
458 lastScanFlush = gcw.scanWork
459 nextScanFlush = lastScanFlush + flushScanCredit
460 }
461 }
462 if flushScanCredit != -1 {
463 credit := gcw.scanWork - lastScanFlush
464 xaddint64(&gcController.bgScanCredit, credit)
Russ Cox484f8012015-02-19 13:38:46 -0500465 }
Russ Cox484f8012015-02-19 13:38:46 -0500466}
467
Austin Clements8d03acc2015-03-23 21:07:33 -0400468// gcDrainUntilPreempt blackens grey objects until g.preempt is set.
469// This is best-effort, so it will return as soon as it is unable to
470// get work, even though there may be more work in the system.
471//go:nowritebarrier
472func gcDrainUntilPreempt(gcw *gcWork, flushScanCredit int64) {
473 if gcphase != _GCmark {
474 println("gcphase =", gcphase)
475 throw("gcDrainUntilPreempt phase incorrect")
476 }
477
478 var lastScanFlush, nextScanFlush int64
479 if flushScanCredit != -1 {
480 lastScanFlush = gcw.scanWork
481 nextScanFlush = lastScanFlush + flushScanCredit
482 } else {
483 nextScanFlush = int64(^uint64(0) >> 1)
484 }
485
486 gp := getg()
487 for !gp.preempt {
488 // If the work queue is empty, balance. During
489 // concurrent mark we don't really know if anyone else
490 // can make use of this work, but even if we're the
491 // only worker, the total cost of this per cycle is
492 // only O(_WorkbufSize) pointer copies.
493 if work.full == 0 && work.partial == 0 {
494 gcw.balance()
495 }
496
497 b := gcw.tryGet()
498 if b == 0 {
499 // No more work
500 break
501 }
502 scanobject(b, 0, nil, gcw)
503
504 // Flush background scan work credit to the global
505 // account if we've accumulated enough locally so
506 // mutator assists can draw on it.
507 if gcw.scanWork >= nextScanFlush {
508 credit := gcw.scanWork - lastScanFlush
509 xaddint64(&gcController.bgScanCredit, credit)
510 lastScanFlush = gcw.scanWork
511 nextScanFlush = lastScanFlush + flushScanCredit
512 }
513 }
514 if flushScanCredit != -1 {
515 credit := gcw.scanWork - lastScanFlush
516 xaddint64(&gcController.bgScanCredit, credit)
517 }
518}
519
Austin Clements028f9722015-03-13 14:01:16 -0400520// gcDrainN blackens grey objects until it has performed roughly
521// scanWork units of scan work. This is best-effort, so it may perform
522// less work if it fails to get a work buffer. Otherwise, it will
523// perform at least n units of work, but may perform more because
524// scanning is always done in whole object increments.
Russ Cox484f8012015-02-19 13:38:46 -0500525//go:nowritebarrier
Austin Clements028f9722015-03-13 14:01:16 -0400526func gcDrainN(gcw *gcWork, scanWork int64) {
Austin Clements028f9722015-03-13 14:01:16 -0400527 targetScanWork := gcw.scanWork + scanWork
528 for gcw.scanWork < targetScanWork {
Russ Cox484f8012015-02-19 13:38:46 -0500529 // This might be a good place to add prefetch code...
530 // if(wbuf.nobj > 4) {
531 // PREFETCH(wbuf->obj[wbuf.nobj - 3];
532 // }
533 b := gcw.tryGet()
534 if b == 0 {
535 return
536 }
Austin Clementscadd4f82015-03-12 13:09:30 -0400537 scanobject(b, 0, nil, gcw)
Russ Cox484f8012015-02-19 13:38:46 -0500538 }
539}
540
541// scanblock scans b as scanobject would.
542// If the gcphase is GCscan, scanblock performs additional checks.
543//go:nowritebarrier
Austin Clementscadd4f82015-03-12 13:09:30 -0400544func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
Russ Cox484f8012015-02-19 13:38:46 -0500545 // Use local copies of original parameters, so that a stack trace
546 // due to one of the throws below shows the original block
547 // base and extent.
548 b := b0
549 n := n0
550
551 // ptrmask can have 2 possible values:
552 // 1. nil - obtain pointer mask from GC bitmap.
553 // 2. pointer to a compact mask (for stacks and data).
554
555 scanobject(b, n, ptrmask, gcw)
556 if gcphase == _GCscan {
557 if inheap(b) && ptrmask == nil {
558 // b is in heap, we are in GCscan so there should be a ptrmask.
559 throw("scanblock: In GCscan phase and inheap is true.")
560 }
561 }
562}
563
Austin Clements63caec52015-04-26 18:27:17 -0400564// scanobject scans memory starting at b, adding pointers to gcw.
565// If ptrmask != nil, it specifies the pointer mask starting at b and
566// n specifies the number of bytes to scan.
567// If ptrmask == nil, b must point to the beginning of a heap object
568// and scanobject consults the GC bitmap for the pointer mask and the
569// spans for the size of the object (it ignores n).
Russ Cox484f8012015-02-19 13:38:46 -0500570//go:nowritebarrier
Austin Clementscadd4f82015-03-12 13:09:30 -0400571func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWork) {
Russ Cox484f8012015-02-19 13:38:46 -0500572 arena_start := mheap_.arena_start
573 arena_used := mheap_.arena_used
Austin Clements571ebae2015-03-12 12:08:47 -0400574 scanWork := int64(0)
Russ Cox484f8012015-02-19 13:38:46 -0500575
576 // Find bits of the beginning of the object.
577 var hbits heapBits
Rick Hudson122384e2015-03-03 16:55:14 -0500578
Russ Cox484f8012015-02-19 13:38:46 -0500579 if ptrmask == nil {
Austin Clements63caec52015-04-26 18:27:17 -0400580 // b must point to the beginning of a heap object, so
581 // we can get its bits and span directly.
582 hbits = heapBitsForAddr(b)
583 s := spanOfUnchecked(b)
Rick Hudson122384e2015-03-03 16:55:14 -0500584 n = s.elemsize
Russ Cox484f8012015-02-19 13:38:46 -0500585 if n == 0 {
Rick Hudson122384e2015-03-03 16:55:14 -0500586 throw("scanobject n == 0")
Russ Cox484f8012015-02-19 13:38:46 -0500587 }
588 }
589 for i := uintptr(0); i < n; i += ptrSize {
590 // Find bits for this word.
591 var bits uintptr
592 if ptrmask != nil {
593 // dense mask (stack or data)
594 bits = (uintptr(*(*byte)(add(unsafe.Pointer(ptrmask), (i/ptrSize)/4))) >> (((i / ptrSize) % 4) * typeBitsWidth)) & typeMask
595 } else {
Rick Hudson77f56af2015-04-22 15:06:35 -0400596 if i != 0 {
597 // Avoid needless hbits.next() on last iteration.
598 hbits = hbits.next()
599 }
Russ Cox484f8012015-02-19 13:38:46 -0500600 bits = uintptr(hbits.typeBits())
Rick Hudson122384e2015-03-03 16:55:14 -0500601 if bits == typeDead {
602 break // no more pointers in this object
Russ Cox484f8012015-02-19 13:38:46 -0500603 }
Russ Cox484f8012015-02-19 13:38:46 -0500604 }
605
606 if bits <= typeScalar { // typeScalar, typeDead, typeScalarMarked
607 continue
608 }
609
610 if bits&typePointer != typePointer {
Russ Cox89a091d2015-02-19 16:43:27 -0500611 print("gc useCheckmark=", useCheckmark, " b=", hex(b), " ptrmask=", ptrmask, "\n")
Russ Cox484f8012015-02-19 13:38:46 -0500612 throw("unexpected garbage collection bits")
613 }
614
615 obj := *(*uintptr)(unsafe.Pointer(b + i))
616
Austin Clements571ebae2015-03-12 12:08:47 -0400617 // Track the scan work performed as a way to estimate
618 // GC time. We use the number of pointers scanned
619 // because pointer scanning dominates the cost of
620 // scanning.
621 //
622 // TODO(austin): Consider counting only pointers into
623 // the heap, since nil and non-heap pointers are
624 // probably cheap to scan.
625 scanWork++
626
Russ Cox484f8012015-02-19 13:38:46 -0500627 // At this point we have extracted the next potential pointer.
628 // Check if it points into heap.
629 if obj == 0 || obj < arena_start || obj >= arena_used {
630 continue
631 }
632
Russ Cox89a091d2015-02-19 16:43:27 -0500633 if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && useCheckmark {
Russ Cox484f8012015-02-19 13:38:46 -0500634 checkwbshadow((*uintptr)(unsafe.Pointer(b + i)))
635 }
636
637 // Mark the object.
Austin Clements50a66562015-03-12 16:53:57 -0400638 if obj, hbits, span := heapBitsForObject(obj); obj != 0 {
639 greyobject(obj, b, i, hbits, span, gcw)
Russ Cox484f8012015-02-19 13:38:46 -0500640 }
641 }
Austin Clements50a66562015-03-12 16:53:57 -0400642 gcw.bytesMarked += uint64(n)
Austin Clements571ebae2015-03-12 12:08:47 -0400643 gcw.scanWork += scanWork
Russ Cox484f8012015-02-19 13:38:46 -0500644}
645
646// Shade the object if it isn't already.
647// The object is not nil and known to be in the heap.
Austin Clements1b4025f2015-04-19 15:22:20 -0400648// Preemption must be disabled.
Russ Cox484f8012015-02-19 13:38:46 -0500649//go:nowritebarrier
650func shade(b uintptr) {
Austin Clements50a66562015-03-12 16:53:57 -0400651 if obj, hbits, span := heapBitsForObject(b); obj != 0 {
Austin Clements1b4025f2015-04-19 15:22:20 -0400652 gcw := &getg().m.p.ptr().gcw
653 greyobject(obj, 0, 0, hbits, span, gcw)
Russ Cox484f8012015-02-19 13:38:46 -0500654 if gcphase == _GCmarktermination {
Austin Clements1b4025f2015-04-19 15:22:20 -0400655 // Ps aren't allowed to cache work during mark
656 // termination.
Russ Cox484f8012015-02-19 13:38:46 -0500657 gcw.dispose()
Russ Cox484f8012015-02-19 13:38:46 -0500658 }
659 }
660}
661
662// obj is the start of an object with mark mbits.
Austin Clements33e0f3d2015-04-27 15:42:45 -0400663// If it isn't already marked, mark it and enqueue into gcw.
Russ Cox484f8012015-02-19 13:38:46 -0500664// base and off are for debugging only and could be removed.
665//go:nowritebarrier
Austin Clements50a66562015-03-12 16:53:57 -0400666func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork) {
Russ Cox484f8012015-02-19 13:38:46 -0500667 // obj should be start of allocation, and so must be at least pointer-aligned.
668 if obj&(ptrSize-1) != 0 {
669 throw("greyobject: obj not pointer-aligned")
670 }
671
Russ Cox89a091d2015-02-19 16:43:27 -0500672 if useCheckmark {
Russ Cox484f8012015-02-19 13:38:46 -0500673 if !hbits.isMarked() {
Austin Clements506615d2015-03-12 14:26:04 -0400674 printlock()
Russ Cox484f8012015-02-19 13:38:46 -0500675 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
676 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
677
678 // Dump the source (base) object
Austin Clements506615d2015-03-12 14:26:04 -0400679 gcDumpObject("base", base, off)
Russ Cox484f8012015-02-19 13:38:46 -0500680
681 // Dump the object
Austin Clements506615d2015-03-12 14:26:04 -0400682 gcDumpObject("obj", obj, ^uintptr(0))
Russ Cox484f8012015-02-19 13:38:46 -0500683
Russ Cox484f8012015-02-19 13:38:46 -0500684 throw("checkmark found unmarked object")
685 }
686 if !hbits.isCheckmarked() {
687 return
688 }
689 hbits.setCheckmarked()
690 if !hbits.isCheckmarked() {
691 throw("setCheckmarked and isCheckmarked disagree")
692 }
693 } else {
694 // If marked we have nothing to do.
695 if hbits.isMarked() {
696 return
697 }
698
Russ Cox484f8012015-02-19 13:38:46 -0500699 hbits.setMarked()
Russ Cox484f8012015-02-19 13:38:46 -0500700
Austin Clementsda4874c2015-02-27 12:41:20 -0500701 // If this is a noscan object, fast-track it to black
702 // instead of greying it.
703 if hbits.typeBits() == typeDead {
Austin Clements50a66562015-03-12 16:53:57 -0400704 gcw.bytesMarked += uint64(span.elemsize)
Austin Clementsda4874c2015-02-27 12:41:20 -0500705 return
706 }
Russ Cox484f8012015-02-19 13:38:46 -0500707 }
708
709 // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
710 // seems like a nice optimization that can be added back in.
711 // There needs to be time between the PREFETCH and the use.
712 // Previously we put the obj in an 8 element buffer that is drained at a rate
713 // to give the PREFETCH time to do its work.
714 // Use of PREFETCHNTA might be more appropriate than PREFETCH
715
716 gcw.put(obj)
717}
718
Austin Clements506615d2015-03-12 14:26:04 -0400719// gcDumpObject dumps the contents of obj for debugging and marks the
720// field at byte offset off in obj.
721func gcDumpObject(label string, obj, off uintptr) {
Austin Clements3ca20212015-04-29 15:15:43 -0400722 if obj < mheap_.arena_start || obj >= mheap_.arena_used {
723 print(label, "=", hex(obj), " is not a heap object\n")
724 return
725 }
Austin Clements506615d2015-03-12 14:26:04 -0400726 k := obj >> _PageShift
727 x := k
728 x -= mheap_.arena_start >> _PageShift
729 s := h_spans[x]
730 print(label, "=", hex(obj), " k=", hex(k))
731 if s == nil {
732 print(" s=nil\n")
733 return
734 }
735 print(" s.start*_PageSize=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
736 for i := uintptr(0); i < s.elemsize; i += ptrSize {
737 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)))))
738 if i == off {
739 print(" <==")
740 }
741 print("\n")
742 }
743}
744
Russ Cox484f8012015-02-19 13:38:46 -0500745// When in GCmarkterminate phase we allocate black.
746//go:nowritebarrier
Austin Clements50a66562015-03-12 16:53:57 -0400747func gcmarknewobject_m(obj, size uintptr) {
Russ Cox484f8012015-02-19 13:38:46 -0500748 if gcphase != _GCmarktermination {
749 throw("marking new object while not in mark termination phase")
750 }
Russ Cox89a091d2015-02-19 16:43:27 -0500751 if useCheckmark { // The world should be stopped so this should not happen.
Russ Cox484f8012015-02-19 13:38:46 -0500752 throw("gcmarknewobject called while doing checkmark")
753 }
754
755 heapBitsForAddr(obj).setMarked()
Austin Clements50a66562015-03-12 16:53:57 -0400756 xadd64(&work.bytesMarked, int64(size))
Russ Cox484f8012015-02-19 13:38:46 -0500757}
758
759// Checkmarking
760
761// To help debug the concurrent GC we remark with the world
762// stopped ensuring that any object encountered has their normal
763// mark bit set. To do this we use an orthogonal bit
764// pattern to indicate the object is marked. The following pattern
765// uses the upper two bits in the object's bounday nibble.
766// 01: scalar not marked
767// 10: pointer not marked
768// 11: pointer marked
769// 00: scalar marked
770// Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
771// The higher bit is 1 for pointers and 0 for scalars, whether the object
772// is marked or not.
773// The first nibble no longer holds the typeDead pattern indicating that the
774// there are no more pointers in the object. This information is held
775// in the second nibble.
776
Russ Cox89a091d2015-02-19 16:43:27 -0500777// If useCheckmark is true, marking of an object uses the
778// checkmark bits (encoding above) instead of the standard
779// mark bits.
780var useCheckmark = false
Russ Cox484f8012015-02-19 13:38:46 -0500781
782//go:nowritebarrier
783func initCheckmarks() {
Russ Cox89a091d2015-02-19 16:43:27 -0500784 useCheckmark = true
Russ Cox484f8012015-02-19 13:38:46 -0500785 for _, s := range work.spans {
786 if s.state == _MSpanInUse {
787 heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
788 }
789 }
790}
791
792func clearCheckmarks() {
Russ Cox89a091d2015-02-19 16:43:27 -0500793 useCheckmark = false
Russ Cox484f8012015-02-19 13:38:46 -0500794 for _, s := range work.spans {
795 if s.state == _MSpanInUse {
796 heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
797 }
798 }
799}