blob: eaa446323be233990c287ed710e7ac16341b90f8 [file] [log] [blame]
Russ Cox484f8012015-02-19 13:38:46 -05001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Garbage collector: sweeping
6
7package runtime
8
9import "unsafe"
10
11var sweep sweepdata
12
13// State of background sweep.
Russ Cox484f8012015-02-19 13:38:46 -050014type sweepdata struct {
Russ Cox89a091d2015-02-19 16:43:27 -050015 lock mutex
Russ Cox484f8012015-02-19 13:38:46 -050016 g *g
17 parked bool
18 started bool
19
20 spanidx uint32 // background sweeper position
21
22 nbgsweep uint32
23 npausesweep uint32
24}
25
Russ Cox484f8012015-02-19 13:38:46 -050026//go:nowritebarrier
27func finishsweep_m() {
28 // The world is stopped so we should be able to complete the sweeps
29 // quickly.
30 for sweepone() != ^uintptr(0) {
31 sweep.npausesweep++
32 }
33
34 // There may be some other spans being swept concurrently that
35 // we need to wait for. If finishsweep_m is done with the world stopped
36 // this code is not required.
37 sg := mheap_.sweepgen
38 for _, s := range work.spans {
39 if s.sweepgen != sg && s.state == _MSpanInUse {
40 mSpan_EnsureSwept(s)
41 }
42 }
43}
44
Russ Cox5789b282015-03-05 16:04:17 -050045func bgsweep(c chan int) {
Russ Cox484f8012015-02-19 13:38:46 -050046 sweep.g = getg()
Russ Cox5789b282015-03-05 16:04:17 -050047
48 lock(&sweep.lock)
49 sweep.parked = true
50 c <- 1
Dmitry Vyukov919fd242015-02-21 21:01:40 +030051 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
Russ Cox5789b282015-03-05 16:04:17 -050052
Russ Cox484f8012015-02-19 13:38:46 -050053 for {
54 for gosweepone() != ^uintptr(0) {
55 sweep.nbgsweep++
56 Gosched()
57 }
Russ Cox89a091d2015-02-19 16:43:27 -050058 lock(&sweep.lock)
Russ Cox484f8012015-02-19 13:38:46 -050059 if !gosweepdone() {
60 // This can happen if a GC runs between
61 // gosweepone returning ^0 above
62 // and the lock being acquired.
Russ Cox89a091d2015-02-19 16:43:27 -050063 unlock(&sweep.lock)
Russ Cox484f8012015-02-19 13:38:46 -050064 continue
65 }
66 sweep.parked = true
Dmitry Vyukov919fd242015-02-21 21:01:40 +030067 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
Russ Cox484f8012015-02-19 13:38:46 -050068 }
69}
70
71// sweeps one span
72// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
73//go:nowritebarrier
74func sweepone() uintptr {
75 _g_ := getg()
76
77 // increment locks to ensure that the goroutine is not preempted
78 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
79 _g_.m.locks++
80 sg := mheap_.sweepgen
81 for {
82 idx := xadd(&sweep.spanidx, 1) - 1
83 if idx >= uint32(len(work.spans)) {
84 mheap_.sweepdone = 1
85 _g_.m.locks--
86 return ^uintptr(0)
87 }
88 s := work.spans[idx]
89 if s.state != mSpanInUse {
90 s.sweepgen = sg
91 continue
92 }
93 if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) {
94 continue
95 }
96 npages := s.npages
97 if !mSpan_Sweep(s, false) {
98 npages = 0
99 }
100 _g_.m.locks--
101 return npages
102 }
103}
104
105//go:nowritebarrier
106func gosweepone() uintptr {
107 var ret uintptr
108 systemstack(func() {
109 ret = sweepone()
110 })
111 return ret
112}
113
114//go:nowritebarrier
115func gosweepdone() bool {
116 return mheap_.sweepdone != 0
117}
118
119// Returns only when span s has been swept.
120//go:nowritebarrier
121func mSpan_EnsureSwept(s *mspan) {
122 // Caller must disable preemption.
123 // Otherwise when this function returns the span can become unswept again
124 // (if GC is triggered on another goroutine).
125 _g_ := getg()
126 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
127 throw("MSpan_EnsureSwept: m is not locked")
128 }
129
130 sg := mheap_.sweepgen
131 if atomicload(&s.sweepgen) == sg {
132 return
133 }
134 // The caller must be sure that the span is a MSpanInUse span.
135 if cas(&s.sweepgen, sg-2, sg-1) {
136 mSpan_Sweep(s, false)
137 return
138 }
139 // unfortunate condition, and we don't have efficient means to wait
140 for atomicload(&s.sweepgen) != sg {
141 osyield()
142 }
143}
144
145// Sweep frees or collects finalizers for blocks not marked in the mark phase.
146// It clears the mark bits in preparation for the next GC round.
147// Returns true if the span was returned to heap.
148// If preserve=true, don't return it to heap nor relink in MCentral lists;
149// caller takes care of it.
150//TODO go:nowritebarrier
151func mSpan_Sweep(s *mspan, preserve bool) bool {
Russ Cox484f8012015-02-19 13:38:46 -0500152 // It's critical that we enter this function with preemption disabled,
153 // GC must not start while we are in the middle of this function.
154 _g_ := getg()
155 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
156 throw("MSpan_Sweep: m is not locked")
157 }
158 sweepgen := mheap_.sweepgen
159 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
160 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
161 throw("MSpan_Sweep: bad span state")
162 }
163
164 if trace.enabled {
165 traceGCSweepStart()
166 }
167
Austin Clements24a72522015-04-13 23:34:57 -0400168 xadd64(&mheap_.pagesSwept, int64(s.npages))
169
Russ Cox484f8012015-02-19 13:38:46 -0500170 cl := s.sizeclass
171 size := s.elemsize
172 res := false
Austin Clementse33d6b32015-07-17 09:49:33 -0700173 nfree := 0
Russ Cox484f8012015-02-19 13:38:46 -0500174
175 var head, end gclinkptr
176
177 c := _g_.m.mcache
Austin Clementscc8f5442015-07-16 16:27:09 -0400178 freeToHeap := false
Russ Cox484f8012015-02-19 13:38:46 -0500179
180 // Mark any free objects in this span so we don't collect them.
Austin Clements52502792015-06-04 18:14:57 -0400181 sstart := uintptr(s.start << _PageShift)
Russ Cox484f8012015-02-19 13:38:46 -0500182 for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
Austin Clements52502792015-06-04 18:14:57 -0400183 if uintptr(link) < sstart || s.limit <= uintptr(link) {
184 // Free list is corrupted.
185 dumpFreeList(s)
186 throw("free list corrupted")
187 }
Russ Cox484f8012015-02-19 13:38:46 -0500188 heapBitsForAddr(uintptr(link)).setMarkedNonAtomic()
189 }
190
191 // Unlink & free special records for any objects we're about to free.
192 specialp := &s.specials
193 special := *specialp
194 for special != nil {
195 // A finalizer can be set for an inner byte of an object, find object beginning.
196 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
197 hbits := heapBitsForAddr(p)
198 if !hbits.isMarked() {
199 // Find the exact byte for which the special was setup
200 // (as opposed to object beginning).
201 p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
202 // about to free object: splice out special record
203 y := special
204 special = special.next
205 *specialp = special
206 if !freespecial(y, unsafe.Pointer(p), size, false) {
207 // stop freeing of object if it has a finalizer
208 hbits.setMarkedNonAtomic()
209 }
210 } else {
211 // object is still live: keep special record
212 specialp = &special.next
213 special = *specialp
214 }
215 }
216
217 // Sweep through n objects of given size starting at p.
218 // This thread owns the span now, so it can manipulate
219 // the block bitmap without atomic operations.
220
221 size, n, _ := s.layout()
222 heapBitsSweepSpan(s.base(), size, n, func(p uintptr) {
223 // At this point we know that we are looking at garbage object
224 // that needs to be collected.
225 if debug.allocfreetrace != 0 {
226 tracefree(unsafe.Pointer(p), size)
227 }
228
229 // Reset to allocated+noscan.
230 if cl == 0 {
231 // Free large span.
232 if preserve {
233 throw("can't preserve large span")
234 }
Rick Hudson122384e2015-03-03 16:55:14 -0500235 heapBitsForSpan(p).initSpan(s.layout())
Russ Cox484f8012015-02-19 13:38:46 -0500236 s.needzero = 1
237
238 // important to set sweepgen before returning it to heap
239 atomicstore(&s.sweepgen, sweepgen)
Russ Cox484f8012015-02-19 13:38:46 -0500240
Austin Clementscc8f5442015-07-16 16:27:09 -0400241 // Free the span after heapBitsSweepSpan
242 // returns, since it's not done with the span.
243 freeToHeap = true
Russ Cox484f8012015-02-19 13:38:46 -0500244 } else {
245 // Free small object.
246 if size > 2*ptrSize {
247 *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
248 } else if size > ptrSize {
249 *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
250 }
251 if head.ptr() == nil {
252 head = gclinkptr(p)
253 } else {
254 end.ptr().next = gclinkptr(p)
255 }
256 end = gclinkptr(p)
257 end.ptr().next = gclinkptr(0x0bade5)
258 nfree++
259 }
260 })
261
262 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
263 // because of the potential for a concurrent free/SetFinalizer.
264 // But we need to set it before we make the span available for allocation
265 // (return it to heap or mcentral), because allocation code assumes that a
266 // span is already swept if available for allocation.
Austin Clementscc8f5442015-07-16 16:27:09 -0400267 //
268 // TODO(austin): Clean this up by consolidating atomicstore in
269 // large span path above with this.
270 if !freeToHeap && nfree == 0 {
Russ Cox484f8012015-02-19 13:38:46 -0500271 // The span must be in our exclusive ownership until we update sweepgen,
272 // check for potential races.
273 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
274 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
275 throw("MSpan_Sweep: bad span state after sweep")
276 }
277 atomicstore(&s.sweepgen, sweepgen)
278 }
279 if nfree > 0 {
280 c.local_nsmallfree[cl] += uintptr(nfree)
Russ Cox484f8012015-02-19 13:38:46 -0500281 res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
282 // MCentral_FreeSpan updates sweepgen
Austin Clementscc8f5442015-07-16 16:27:09 -0400283 } else if freeToHeap {
284 // Free large span to heap
285
286 // NOTE(rsc,dvyukov): The original implementation of efence
287 // in CL 22060046 used SysFree instead of SysFault, so that
288 // the operating system would eventually give the memory
289 // back to us again, so that an efence program could run
290 // longer without running out of memory. Unfortunately,
291 // calling SysFree here without any kind of adjustment of the
292 // heap data structures means that when the memory does
293 // come back to us, we have the wrong metadata for it, either in
294 // the MSpan structures or in the garbage collection bitmap.
295 // Using SysFault here means that the program will run out of
296 // memory fairly quickly in efence mode, but at least it won't
297 // have mysterious crashes due to confused memory reuse.
298 // It should be possible to switch back to SysFree if we also
299 // implement and then call some kind of MHeap_DeleteSpan.
300 if debug.efence > 0 {
301 s.limit = 0 // prevent mlookup from finding this span
302 sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
303 } else {
304 mHeap_Free(&mheap_, s, 1)
305 }
306 c.local_nlargefree++
307 c.local_largefree += size
308 res = true
Russ Cox484f8012015-02-19 13:38:46 -0500309 }
310 if trace.enabled {
311 traceGCSweepDone()
Russ Cox484f8012015-02-19 13:38:46 -0500312 }
313 return res
314}
Austin Clements52502792015-06-04 18:14:57 -0400315
Austin Clementsfc9ca852015-08-03 09:46:50 -0400316// deductSweepCredit deducts sweep credit for allocating a span of
317// size spanBytes. This must be performed *before* the span is
318// allocated to ensure the system has enough credit. If necessary, it
319// performs sweeping to prevent going in to debt. If the caller will
320// also sweep pages (e.g., for a large allocation), it can pass a
321// non-zero callerSweepPages to leave that many pages unswept.
322//
323// deductSweepCredit is the core of the "proportional sweep" system.
324// It uses statistics gathered by the garbage collector to perform
325// enough sweeping so that all pages are swept during the concurrent
326// sweep phase between GC cycles.
327//
328// mheap_ must NOT be locked.
329func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
330 if mheap_.sweepPagesPerByte == 0 {
331 // Proportional sweep is done or disabled.
332 return
333 }
334
335 // Account for this span allocation.
336 spanBytesAlloc := xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
337
338 // Fix debt if necessary.
339 pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
340 for pagesOwed-int64(atomicload64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
341 if gosweepone() == ^uintptr(0) {
342 mheap_.sweepPagesPerByte = 0
343 break
344 }
345 }
346}
347
Austin Clements52502792015-06-04 18:14:57 -0400348func dumpFreeList(s *mspan) {
349 printlock()
350 print("runtime: free list of span ", s, ":\n")
351 sstart := uintptr(s.start << _PageShift)
352 link := s.freelist
353 for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ {
354 if i != 0 {
355 print(" -> ")
356 }
357 print(hex(link))
358 if link.ptr() == nil {
359 break
360 }
361 if uintptr(link) < sstart || s.limit <= uintptr(link) {
362 // Bad link. Stop walking before we crash.
363 print(" (BAD)")
364 break
365 }
366 link = link.ptr().next
367 }
368 print("\n")
369 printunlock()
370}