blob: a11724500fc97b6c1b26ad24334bf2c92121233b [file] [log] [blame]
Keith Randall4aa50432014-07-30 09:01:52 -07001// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8 "unsafe"
9)
10
11const (
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +040012 debugMalloc = false
13
Russ Coxd2574e22014-09-16 10:22:15 -040014 flagNoScan = _FlagNoScan
15 flagNoZero = _FlagNoZero
Keith Randall4aa50432014-07-30 09:01:52 -070016
Russ Coxd2574e22014-09-16 10:22:15 -040017 maxTinySize = _TinySize
18 tinySizeClass = _TinySizeClass
19 maxSmallSize = _MaxSmallSize
Keith Randall4aa50432014-07-30 09:01:52 -070020
Russ Coxd2574e22014-09-16 10:22:15 -040021 pageShift = _PageShift
22 pageSize = _PageSize
23 pageMask = _PageMask
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +040024
Russ Coxd2574e22014-09-16 10:22:15 -040025 bitsPerPointer = _BitsPerPointer
26 bitsMask = _BitsMask
27 pointersPerByte = _PointersPerByte
28 maxGCMask = _MaxGCMask
29 bitsDead = _BitsDead
30 bitsPointer = _BitsPointer
Russ Cox1e2d2f02014-11-11 17:05:02 -050031 bitsScalar = _BitsScalar
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +040032
Russ Coxd2574e22014-09-16 10:22:15 -040033 mSpanInUse = _MSpanInUse
Keith Randallc46bcd42014-08-28 13:23:10 -070034
Russ Cox1e2d2f02014-11-11 17:05:02 -050035 concurrentSweep = _ConcurrentSweep
Keith Randall4aa50432014-07-30 09:01:52 -070036)
37
Russ Coxd21638b2014-08-27 21:59:49 -040038// Page number (address>>pageShift)
39type pageID uintptr
40
Russ Coxd2574e22014-09-16 10:22:15 -040041// base address for all 0-byte allocations
42var zerobase uintptr
Keith Randall4aa50432014-07-30 09:01:52 -070043
Dmitriy Vyukov39506552014-08-05 17:03:06 +040044// Allocate an object of size bytes.
45// Small objects are allocated from the per-P cache's free lists.
Keith Randall4aa50432014-07-30 09:01:52 -070046// Large objects (> 32 kB) are allocated straight from the heap.
Austin Clements489ff752014-11-03 13:26:46 -050047func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
Keith Randall4aa50432014-07-30 09:01:52 -070048 if size == 0 {
Russ Coxd2574e22014-09-16 10:22:15 -040049 return unsafe.Pointer(&zerobase)
Keith Randall4aa50432014-07-30 09:01:52 -070050 }
Keith Randall4aa50432014-07-30 09:01:52 -070051 size0 := size
52
Russ Coxbffb0592014-09-09 01:08:34 -040053 if flags&flagNoScan == 0 && typ == nil {
54 gothrow("malloc missing type")
55 }
56
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +040057 // This function must be atomic wrt GC, but for performance reasons
58 // we don't acquirem/releasem on fast path. The code below does not have
59 // split stack checks, so it can't be preempted by GC.
60 // Functions like roundup/add are inlined. And onM/racemalloc are nosplit.
61 // If debugMalloc = true, these assumptions are checked below.
62 if debugMalloc {
63 mp := acquirem()
64 if mp.mallocing != 0 {
65 gothrow("malloc deadlock")
66 }
67 mp.mallocing = 1
68 if mp.curg != nil {
Russ Coxd21638b2014-08-27 21:59:49 -040069 mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +040070 }
71 }
72
73 c := gomcache()
Keith Randall4aa50432014-07-30 09:01:52 -070074 var s *mspan
75 var x unsafe.Pointer
76 if size <= maxSmallSize {
77 if flags&flagNoScan != 0 && size < maxTinySize {
78 // Tiny allocator.
79 //
80 // Tiny allocator combines several tiny allocation requests
81 // into a single memory block. The resulting memory block
82 // is freed when all subobjects are unreachable. The subobjects
83 // must be FlagNoScan (don't have pointers), this ensures that
84 // the amount of potentially wasted memory is bounded.
85 //
86 // Size of the memory block used for combining (maxTinySize) is tunable.
87 // Current setting is 16 bytes, which relates to 2x worst case memory
88 // wastage (when all but one subobjects are unreachable).
89 // 8 bytes would result in no wastage at all, but provides less
90 // opportunities for combining.
91 // 32 bytes provides more opportunities for combining,
92 // but can lead to 4x worst case wastage.
93 // The best case winning is 8x regardless of block size.
94 //
95 // Objects obtained from tiny allocator must not be freed explicitly.
96 // So when an object will be freed explicitly, we ensure that
97 // its size >= maxTinySize.
98 //
99 // SetFinalizer has a special case for objects potentially coming
100 // from tiny allocator, it such case it allows to set finalizers
101 // for an inner byte of a memory block.
102 //
103 // The main targets of tiny allocator are small strings and
104 // standalone escaping variables. On a json benchmark
105 // the allocator reduces number of allocations by ~12% and
106 // reduces heap size by ~20%.
Keith Randall4aa50432014-07-30 09:01:52 -0700107 tinysize := uintptr(c.tinysize)
108 if size <= tinysize {
109 tiny := unsafe.Pointer(c.tiny)
110 // Align tiny pointer for required (conservative) alignment.
111 if size&7 == 0 {
112 tiny = roundup(tiny, 8)
113 } else if size&3 == 0 {
114 tiny = roundup(tiny, 4)
115 } else if size&1 == 0 {
116 tiny = roundup(tiny, 2)
117 }
118 size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny)))
119 if size1 <= tinysize {
120 // The object fits into existing tiny block.
121 x = tiny
122 c.tiny = (*byte)(add(x, size))
Russ Coxd21638b2014-08-27 21:59:49 -0400123 c.tinysize -= uintptr(size1)
Russ Coxe19d8a42014-09-17 14:49:32 -0400124 c.local_tinyallocs++
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400125 if debugMalloc {
126 mp := acquirem()
127 if mp.mallocing == 0 {
128 gothrow("bad malloc")
129 }
130 mp.mallocing = 0
131 if mp.curg != nil {
Russ Cox15b76ad2014-09-09 13:39:57 -0400132 mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400133 }
Keith Randall1d88f9d2014-09-08 15:42:48 -0700134 // Note: one releasem for the acquirem just above.
135 // The other for the acquirem at start of malloc.
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400136 releasem(mp)
137 releasem(mp)
138 }
Keith Randall4aa50432014-07-30 09:01:52 -0700139 return x
140 }
141 }
142 // Allocate a new maxTinySize block.
143 s = c.alloc[tinySizeClass]
144 v := s.freelist
145 if v == nil {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500146 onM(func() {
147 mCache_Refill(c, tinySizeClass)
148 })
Keith Randall4aa50432014-07-30 09:01:52 -0700149 s = c.alloc[tinySizeClass]
150 v = s.freelist
151 }
152 s.freelist = v.next
153 s.ref++
154 //TODO: prefetch v.next
155 x = unsafe.Pointer(v)
156 (*[2]uint64)(x)[0] = 0
157 (*[2]uint64)(x)[1] = 0
158 // See if we need to replace the existing tiny block with the new one
159 // based on amount of remaining free space.
160 if maxTinySize-size > tinysize {
161 c.tiny = (*byte)(add(x, size))
Russ Coxd21638b2014-08-27 21:59:49 -0400162 c.tinysize = uintptr(maxTinySize - size)
Keith Randall4aa50432014-07-30 09:01:52 -0700163 }
164 size = maxTinySize
165 } else {
166 var sizeclass int8
167 if size <= 1024-8 {
168 sizeclass = size_to_class8[(size+7)>>3]
169 } else {
170 sizeclass = size_to_class128[(size-1024+127)>>7]
171 }
172 size = uintptr(class_to_size[sizeclass])
173 s = c.alloc[sizeclass]
174 v := s.freelist
175 if v == nil {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500176 onM(func() {
177 mCache_Refill(c, int32(sizeclass))
178 })
Keith Randall4aa50432014-07-30 09:01:52 -0700179 s = c.alloc[sizeclass]
180 v = s.freelist
181 }
182 s.freelist = v.next
183 s.ref++
184 //TODO: prefetch
185 x = unsafe.Pointer(v)
186 if flags&flagNoZero == 0 {
187 v.next = nil
188 if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
189 memclr(unsafe.Pointer(v), size)
190 }
191 }
192 }
Russ Coxd21638b2014-08-27 21:59:49 -0400193 c.local_cachealloc += intptr(size)
Keith Randall4aa50432014-07-30 09:01:52 -0700194 } else {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500195 var s *mspan
196 onM(func() {
197 s = largeAlloc(size, uint32(flags))
198 })
Keith Randall4aa50432014-07-30 09:01:52 -0700199 x = unsafe.Pointer(uintptr(s.start << pageShift))
200 size = uintptr(s.elemsize)
201 }
202
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400203 if flags&flagNoScan != 0 {
204 // All objects are pre-marked as noscan.
205 goto marked
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400206 }
Keith Randall4aa50432014-07-30 09:01:52 -0700207
Russ Coxf95beae2014-09-16 10:36:38 -0400208 // If allocating a defer+arg block, now that we've picked a malloc size
209 // large enough to hold everything, cut the "asked for" size down to
210 // just the defer header, so that the GC bitmap will record the arg block
211 // as containing nothing at all (as if it were unused space at the end of
212 // a malloc block caused by size rounding).
213 // The defer arg areas are scanned as part of scanstack.
214 if typ == deferType {
215 size0 = unsafe.Sizeof(_defer{})
216 }
217
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400218 // From here till marked label marking the object as allocated
219 // and storing type info in the GC bitmap.
220 {
221 arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
222 off := (uintptr(x) - arena_start) / ptrSize
Dmitriy Vyukovff3fa1b2014-08-19 17:38:00 +0400223 xbits := (*uint8)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
224 shift := (off % wordsPerBitmapByte) * gcBits
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400225 if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary {
226 println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask))
227 gothrow("bad bits in markallocated")
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400228 }
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400229
230 var ti, te uintptr
231 var ptrmask *uint8
232 if size == ptrSize {
233 // It's one word and it has pointers, it must be a pointer.
234 *xbits |= (bitsPointer << 2) << shift
235 goto marked
236 }
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400237 if typ.kind&kindGCProg != 0 {
238 nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize
239 masksize := nptr
240 if masksize%2 != 0 {
241 masksize *= 2 // repeated
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400242 }
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400243 masksize = masksize * pointersPerByte / 8 // 4 bits per word
244 masksize++ // unroll flag in the beginning
245 if masksize > maxGCMask && typ.gc[1] != 0 {
246 // If the mask is too large, unroll the program directly
247 // into the GC bitmap. It's 7 times slower than copying
248 // from the pre-unrolled mask, but saves 1/16 of type size
249 // memory for the mask.
250 mp := acquirem()
251 mp.ptrarg[0] = x
252 mp.ptrarg[1] = unsafe.Pointer(typ)
Russ Coxd21638b2014-08-27 21:59:49 -0400253 mp.scalararg[0] = uintptr(size)
254 mp.scalararg[1] = uintptr(size0)
Russ Cox012ceed2014-09-03 11:35:22 -0400255 onM(unrollgcproginplace_m)
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400256 releasem(mp)
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400257 goto marked
258 }
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400259 ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
260 // Check whether the program is already unrolled.
Russ Coxd21638b2014-08-27 21:59:49 -0400261 if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400262 mp := acquirem()
263 mp.ptrarg[0] = unsafe.Pointer(typ)
Russ Cox012ceed2014-09-03 11:35:22 -0400264 onM(unrollgcprog_m)
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400265 releasem(mp)
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400266 }
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400267 ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
268 } else {
Russ Cox18172c42014-10-07 11:06:51 -0400269 ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400270 }
271 if size == 2*ptrSize {
Dmitriy Vyukovff3fa1b2014-08-19 17:38:00 +0400272 *xbits = *ptrmask | bitBoundary
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400273 goto marked
274 }
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400275 te = uintptr(typ.size) / ptrSize
276 // If the type occupies odd number of words, its mask is repeated.
277 if te%2 == 0 {
278 te /= 2
279 }
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400280 // Copy pointer bitmask into the bitmap.
281 for i := uintptr(0); i < size0; i += 2 * ptrSize {
Dmitriy Vyukovfb44fb62014-08-19 15:59:42 +0400282 v := *(*uint8)(add(unsafe.Pointer(ptrmask), ti))
283 ti++
284 if ti == te {
285 ti = 0
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400286 }
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400287 if i == 0 {
288 v |= bitBoundary
289 }
290 if i+ptrSize == size0 {
291 v &^= uint8(bitPtrMask << 4)
292 }
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400293
Dmitriy Vyukovff3fa1b2014-08-19 17:38:00 +0400294 *xbits = v
295 xbits = (*byte)(add(unsafe.Pointer(xbits), ^uintptr(0)))
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400296 }
297 if size0%(2*ptrSize) == 0 && size0 < size {
298 // Mark the word after last object's word as bitsDead.
Dmitriy Vyukovff3fa1b2014-08-19 17:38:00 +0400299 *xbits = bitsDead << 2
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400300 }
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400301 }
302marked:
Keith Randall4aa50432014-07-30 09:01:52 -0700303 if raceenabled {
304 racemalloc(x, size)
305 }
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400306
307 if debugMalloc {
308 mp := acquirem()
309 if mp.mallocing == 0 {
310 gothrow("bad malloc")
311 }
312 mp.mallocing = 0
313 if mp.curg != nil {
Russ Cox15b76ad2014-09-09 13:39:57 -0400314 mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400315 }
Keith Randall1d88f9d2014-09-08 15:42:48 -0700316 // Note: one releasem for the acquirem just above.
317 // The other for the acquirem at start of malloc.
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400318 releasem(mp)
319 releasem(mp)
320 }
321
Keith Randall4aa50432014-07-30 09:01:52 -0700322 if debug.allocfreetrace != 0 {
323 tracealloc(x, size, typ)
324 }
Dmitriy Vyukov18374192014-08-13 01:03:32 +0400325
326 if rate := MemProfileRate; rate > 0 {
327 if size < uintptr(rate) && int32(size) < c.next_sample {
328 c.next_sample -= int32(size)
329 } else {
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400330 mp := acquirem()
Dmitriy Vyukov18374192014-08-13 01:03:32 +0400331 profilealloc(mp, x, size)
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400332 releasem(mp)
Keith Randall4aa50432014-07-30 09:01:52 -0700333 }
334 }
335
Dmitriy Vyukovcd2f8352014-08-07 13:04:04 +0400336 if memstats.heap_alloc >= memstats.next_gc {
Keith Randall4aa50432014-07-30 09:01:52 -0700337 gogc(0)
338 }
339
340 return x
341}
342
Keith Randall4aa50432014-07-30 09:01:52 -0700343// implementation of new builtin
344func newobject(typ *_type) unsafe.Pointer {
Austin Clements489ff752014-11-03 13:26:46 -0500345 flags := uint32(0)
Keith Randall4aa50432014-07-30 09:01:52 -0700346 if typ.kind&kindNoPointers != 0 {
347 flags |= flagNoScan
348 }
Russ Coxbffb0592014-09-09 01:08:34 -0400349 return mallocgc(uintptr(typ.size), typ, flags)
Keith Randall4aa50432014-07-30 09:01:52 -0700350}
351
352// implementation of make builtin for slices
353func newarray(typ *_type, n uintptr) unsafe.Pointer {
Austin Clements489ff752014-11-03 13:26:46 -0500354 flags := uint32(0)
Keith Randall4aa50432014-07-30 09:01:52 -0700355 if typ.kind&kindNoPointers != 0 {
356 flags |= flagNoScan
357 }
Russ Cox1e2d2f02014-11-11 17:05:02 -0500358 if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
Keith Randall4aa50432014-07-30 09:01:52 -0700359 panic("runtime: allocation size out of range")
360 }
Russ Coxbffb0592014-09-09 01:08:34 -0400361 return mallocgc(uintptr(typ.size)*n, typ, flags)
Keith Randall4aa50432014-07-30 09:01:52 -0700362}
363
Keith Randallcc9ec522014-07-31 12:43:40 -0700364// rawmem returns a chunk of pointerless memory. It is
365// not zeroed.
366func rawmem(size uintptr) unsafe.Pointer {
Russ Coxbffb0592014-09-09 01:08:34 -0400367 return mallocgc(size, nil, flagNoScan|flagNoZero)
Keith Randallcc9ec522014-07-31 12:43:40 -0700368}
369
Keith Randall4aa50432014-07-30 09:01:52 -0700370// round size up to next size class
371func goroundupsize(size uintptr) uintptr {
372 if size < maxSmallSize {
373 if size <= 1024-8 {
374 return uintptr(class_to_size[size_to_class8[(size+7)>>3]])
375 }
376 return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]])
377 }
378 if size+pageSize < size {
379 return size
380 }
381 return (size + pageSize - 1) &^ pageMask
382}
383
384func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
385 c := mp.mcache
386 rate := MemProfileRate
387 if size < uintptr(rate) {
388 // pick next profile time
389 // If you change this, also change allocmcache.
390 if rate > 0x3fffffff { // make 2*rate not overflow
391 rate = 0x3fffffff
392 }
Keith Randall3306d112014-09-02 14:33:33 -0700393 next := int32(fastrand1()) % (2 * int32(rate))
Keith Randall4aa50432014-07-30 09:01:52 -0700394 // Subtract the "remainder" of the current allocation.
395 // Otherwise objects that are close in size to sampling rate
396 // will be under-sampled, because we consistently discard this remainder.
397 next -= (int32(size) - c.next_sample)
398 if next < 0 {
399 next = 0
400 }
401 c.next_sample = next
402 }
Russ Cox548d0802014-09-01 18:51:12 -0400403
404 mProf_Malloc(x, size)
Keith Randall4aa50432014-07-30 09:01:52 -0700405}
406
407// force = 1 - do GC regardless of current heap usage
408// force = 2 - go GC and eager sweep
409func gogc(force int32) {
Dmitriy Vyukov9f38b6c2014-08-29 18:44:38 +0400410 // The gc is turned off (via enablegc) until the bootstrap has completed.
411 // Also, malloc gets called in the guts of a number of libraries that might be
412 // holding locks. To avoid deadlocks during stoptheworld, don't bother
413 // trying to run gc while holding a lock. The next mallocgc without a lock
414 // will do the gc instead.
Keith Randall4aa50432014-07-30 09:01:52 -0700415 mp := acquirem()
Dmitriy Vyukov9f38b6c2014-08-29 18:44:38 +0400416 if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
Keith Randall4aa50432014-07-30 09:01:52 -0700417 releasem(mp)
418 return
419 }
420 releasem(mp)
Dmitriy Vyukova0dbbea2014-08-21 11:46:53 +0400421 mp = nil
Keith Randall4aa50432014-07-30 09:01:52 -0700422
Keith Randall4aa50432014-07-30 09:01:52 -0700423 semacquire(&worldsema, false)
424
425 if force == 0 && memstats.heap_alloc < memstats.next_gc {
426 // typically threads which lost the race to grab
427 // worldsema exit here when gc is done.
428 semrelease(&worldsema)
429 return
430 }
431
432 // Ok, we're doing it! Stop everybody else
Russ Coxd21638b2014-08-27 21:59:49 -0400433 startTime := nanotime()
Keith Randall4aa50432014-07-30 09:01:52 -0700434 mp = acquirem()
435 mp.gcing = 1
Dmitriy Vyukova0dbbea2014-08-21 11:46:53 +0400436 releasem(mp)
Russ Coxe3edfea2014-09-04 00:54:06 -0400437 onM(stoptheworld)
Dmitriy Vyukova0dbbea2014-08-21 11:46:53 +0400438 if mp != acquirem() {
439 gothrow("gogc: rescheduled")
440 }
Keith Randall4aa50432014-07-30 09:01:52 -0700441
442 clearpools()
443
444 // Run gc on the g0 stack. We do this so that the g stack
445 // we're currently running on will no longer change. Cuts
446 // the root set down a bit (g0 stacks are not scanned, and
447 // we don't need to scan gc's internal state). We also
448 // need to switch to g0 so we can shrink the stack.
449 n := 1
450 if debug.gctrace > 1 {
451 n = 2
452 }
453 for i := 0; i < n; i++ {
454 if i > 0 {
Russ Coxd21638b2014-08-27 21:59:49 -0400455 startTime = nanotime()
Keith Randall4aa50432014-07-30 09:01:52 -0700456 }
457 // switch to g0, call gc, then switch back
Russ Coxd21638b2014-08-27 21:59:49 -0400458 mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits
459 mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits
Keith Randall4aa50432014-07-30 09:01:52 -0700460 if force >= 2 {
Dmitriy Vyukov266d3502014-08-19 11:53:20 +0400461 mp.scalararg[2] = 1 // eagersweep
Keith Randall4aa50432014-07-30 09:01:52 -0700462 } else {
Dmitriy Vyukov266d3502014-08-19 11:53:20 +0400463 mp.scalararg[2] = 0
Keith Randall4aa50432014-07-30 09:01:52 -0700464 }
Russ Cox012ceed2014-09-03 11:35:22 -0400465 onM(gc_m)
Keith Randall4aa50432014-07-30 09:01:52 -0700466 }
467
468 // all done
469 mp.gcing = 0
470 semrelease(&worldsema)
Russ Coxe3edfea2014-09-04 00:54:06 -0400471 onM(starttheworld)
Keith Randall4aa50432014-07-30 09:01:52 -0700472 releasem(mp)
Dmitriy Vyukova0dbbea2014-08-21 11:46:53 +0400473 mp = nil
Keith Randall4aa50432014-07-30 09:01:52 -0700474
475 // now that gc is done, kick off finalizer thread if needed
476 if !concurrentSweep {
477 // give the queued finalizers, if any, a chance to run
Russ Cox15a5c352014-09-11 16:22:21 -0400478 Gosched()
Keith Randall4aa50432014-07-30 09:01:52 -0700479 }
480}
481
482// GC runs a garbage collection.
483func GC() {
484 gogc(2)
485}
486
Russ Cox9a5b0552014-10-06 14:18:09 -0400487// linker-provided
488var noptrdata struct{}
489var enoptrbss struct{}
490
Keith Randall4aa50432014-07-30 09:01:52 -0700491// SetFinalizer sets the finalizer associated with x to f.
492// When the garbage collector finds an unreachable block
493// with an associated finalizer, it clears the association and runs
494// f(x) in a separate goroutine. This makes x reachable again, but
495// now without an associated finalizer. Assuming that SetFinalizer
496// is not called again, the next time the garbage collector sees
497// that x is unreachable, it will free x.
498//
499// SetFinalizer(x, nil) clears any finalizer associated with x.
500//
501// The argument x must be a pointer to an object allocated by
502// calling new or by taking the address of a composite literal.
503// The argument f must be a function that takes a single argument
504// to which x's type can be assigned, and can have arbitrary ignored return
505// values. If either of these is not true, SetFinalizer aborts the
506// program.
507//
508// Finalizers are run in dependency order: if A points at B, both have
509// finalizers, and they are otherwise unreachable, only the finalizer
510// for A runs; once A is freed, the finalizer for B can run.
511// If a cyclic structure includes a block with a finalizer, that
512// cycle is not guaranteed to be garbage collected and the finalizer
513// is not guaranteed to run, because there is no ordering that
514// respects the dependencies.
515//
516// The finalizer for x is scheduled to run at some arbitrary time after
517// x becomes unreachable.
518// There is no guarantee that finalizers will run before a program exits,
519// so typically they are useful only for releasing non-memory resources
520// associated with an object during a long-running program.
521// For example, an os.File object could use a finalizer to close the
522// associated operating system file descriptor when a program discards
523// an os.File without calling Close, but it would be a mistake
524// to depend on a finalizer to flush an in-memory I/O buffer such as a
525// bufio.Writer, because the buffer would not be flushed at program exit.
526//
527// It is not guaranteed that a finalizer will run if the size of *x is
528// zero bytes.
529//
Russ Cox9a5b0552014-10-06 14:18:09 -0400530// It is not guaranteed that a finalizer will run for objects allocated
531// in initializers for package-level variables. Such objects may be
532// linker-allocated, not heap-allocated.
533//
Keith Randall4aa50432014-07-30 09:01:52 -0700534// A single goroutine runs all finalizers for a program, sequentially.
535// If a finalizer must run for a long time, it should do so by starting
536// a new goroutine.
537func SetFinalizer(obj interface{}, finalizer interface{}) {
Keith Randall4aa50432014-07-30 09:01:52 -0700538 e := (*eface)(unsafe.Pointer(&obj))
Keith Randallc46bcd42014-08-28 13:23:10 -0700539 etyp := e._type
540 if etyp == nil {
Keith Randall4aa50432014-07-30 09:01:52 -0700541 gothrow("runtime.SetFinalizer: first argument is nil")
542 }
Keith Randallc46bcd42014-08-28 13:23:10 -0700543 if etyp.kind&kindMask != kindPtr {
544 gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
545 }
546 ot := (*ptrtype)(unsafe.Pointer(etyp))
547 if ot.elem == nil {
548 gothrow("nil elem type!")
549 }
550
Keith Randallc46bcd42014-08-28 13:23:10 -0700551 // find the containing object
552 _, base, _ := findObject(e.data)
553
Keith Randallc46bcd42014-08-28 13:23:10 -0700554 if base == nil {
Russ Cox9a5b0552014-10-06 14:18:09 -0400555 // 0-length objects are okay.
556 if e.data == unsafe.Pointer(&zerobase) {
557 return
558 }
559
560 // Global initializers might be linker-allocated.
561 // var Foo = &Object{}
562 // func main() {
563 // runtime.SetFinalizer(Foo, nil)
564 // }
565 // The segments are, in order: text, rodata, noptrdata, data, bss, noptrbss.
566 if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
567 return
568 }
569 gothrow("runtime.SetFinalizer: pointer not in allocated block")
Keith Randallc46bcd42014-08-28 13:23:10 -0700570 }
571
572 if e.data != base {
573 // As an implementation detail we allow to set finalizers for an inner byte
574 // of an object if it could come from tiny alloc (see mallocgc for details).
575 if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
576 gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block")
577 }
Keith Randall4aa50432014-07-30 09:01:52 -0700578 }
579
580 f := (*eface)(unsafe.Pointer(&finalizer))
581 ftyp := f._type
Keith Randallc46bcd42014-08-28 13:23:10 -0700582 if ftyp == nil {
583 // switch to M stack and remove finalizer
Russ Cox1e2d2f02014-11-11 17:05:02 -0500584 onM(func() {
585 removefinalizer(e.data)
586 })
Keith Randallc46bcd42014-08-28 13:23:10 -0700587 return
588 }
589
590 if ftyp.kind&kindMask != kindFunc {
Keith Randall4aa50432014-07-30 09:01:52 -0700591 gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
592 }
Keith Randallc46bcd42014-08-28 13:23:10 -0700593 ft := (*functype)(unsafe.Pointer(ftyp))
594 ins := *(*[]*_type)(unsafe.Pointer(&ft.in))
595 if ft.dotdotdot || len(ins) != 1 {
596 gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
597 }
598 fint := ins[0]
599 switch {
600 case fint == etyp:
601 // ok - same type
602 goto okarg
603 case fint.kind&kindMask == kindPtr:
604 if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
605 // ok - not same type, but both pointers,
606 // one or the other is unnamed, and same element type, so assignable.
607 goto okarg
608 }
609 case fint.kind&kindMask == kindInterface:
610 ityp := (*interfacetype)(unsafe.Pointer(fint))
611 if len(ityp.mhdr) == 0 {
612 // ok - satisfies empty interface
613 goto okarg
614 }
615 if _, ok := assertE2I2(ityp, obj); ok {
616 goto okarg
617 }
618 }
619 gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
620okarg:
621 // compute size needed for return parameters
622 nret := uintptr(0)
623 for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) {
624 nret = round(nret, uintptr(t.align)) + uintptr(t.size)
625 }
626 nret = round(nret, ptrSize)
627
628 // make sure we have a finalizer goroutine
629 createfing()
630
Russ Cox1e2d2f02014-11-11 17:05:02 -0500631 onM(func() {
632 if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
633 gothrow("runtime.SetFinalizer: finalizer already set")
634 }
635 })
Keith Randall4aa50432014-07-30 09:01:52 -0700636}
Keith Randallc46bcd42014-08-28 13:23:10 -0700637
638// round n up to a multiple of a. a must be a power of 2.
639func round(n, a uintptr) uintptr {
640 return (n + a - 1) &^ (a - 1)
641}
642
643// Look up pointer v in heap. Return the span containing the object,
644// the start of the object, and the size of the object. If the object
645// does not exist, return nil, nil, 0.
646func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
647 c := gomcache()
648 c.local_nlookup++
649 if ptrSize == 4 && c.local_nlookup >= 1<<30 {
650 // purge cache stats to prevent overflow
651 lock(&mheap_.lock)
652 purgecachedstats(c)
653 unlock(&mheap_.lock)
654 }
655
656 // find span
657 arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
658 arena_used := uintptr(unsafe.Pointer(mheap_.arena_used))
659 if uintptr(v) < arena_start || uintptr(v) >= arena_used {
660 return
661 }
662 p := uintptr(v) >> pageShift
663 q := p - arena_start>>pageShift
664 s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize))
665 if s == nil {
666 return
667 }
668 x = unsafe.Pointer(uintptr(s.start) << pageShift)
669
670 if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
671 s = nil
672 x = nil
673 return
674 }
675
676 n = uintptr(s.elemsize)
677 if s.sizeclass != 0 {
678 x = add(x, (uintptr(v)-uintptr(x))/n*n)
679 }
680 return
681}
682
683var fingCreate uint32
684
685func createfing() {
686 // start the finalizer goroutine exactly once
687 if fingCreate == 0 && cas(&fingCreate, 0, 1) {
688 go runfinq()
689 }
690}
691
692// This is the goroutine that runs all of the finalizers
693func runfinq() {
694 var (
695 frame unsafe.Pointer
696 framecap uintptr
697 )
698
699 for {
700 lock(&finlock)
701 fb := finq
702 finq = nil
703 if fb == nil {
704 gp := getg()
705 fing = gp
706 fingwait = true
707 gp.issystem = true
708 goparkunlock(&finlock, "finalizer wait")
709 gp.issystem = false
710 continue
711 }
712 unlock(&finlock)
713 if raceenabled {
714 racefingo()
715 }
716 for fb != nil {
717 for i := int32(0); i < fb.cnt; i++ {
718 f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{})))
719
720 framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret)
721 if framecap < framesz {
722 // The frame does not contain pointers interesting for GC,
723 // all not yet finalized objects are stored in finq.
724 // If we do not mark it as FlagNoScan,
725 // the last finalized object is not collected.
Russ Coxbffb0592014-09-09 01:08:34 -0400726 frame = mallocgc(framesz, nil, flagNoScan)
Keith Randallc46bcd42014-08-28 13:23:10 -0700727 framecap = framesz
728 }
729
730 if f.fint == nil {
731 gothrow("missing type in runfinq")
732 }
733 switch f.fint.kind & kindMask {
734 case kindPtr:
735 // direct use of pointer
736 *(*unsafe.Pointer)(frame) = f.arg
737 case kindInterface:
738 ityp := (*interfacetype)(unsafe.Pointer(f.fint))
739 // set up with empty interface
740 (*eface)(frame)._type = &f.ot.typ
741 (*eface)(frame).data = f.arg
742 if len(ityp.mhdr) != 0 {
743 // convert to interface with methods
744 // this conversion is guaranteed to succeed - we checked in SetFinalizer
745 *(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame))
746 }
747 default:
748 gothrow("bad kind in runfinq")
749 }
Russ Cox84736952014-09-06 13:19:08 -0400750 reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
Keith Randallc46bcd42014-08-28 13:23:10 -0700751
752 // drop finalizer queue references to finalized object
753 f.fn = nil
754 f.arg = nil
755 f.ot = nil
756 }
757 fb.cnt = 0
758 next := fb.next
759 lock(&finlock)
760 fb.next = finc
761 finc = fb
762 unlock(&finlock)
763 fb = next
764 }
765 }
766}
Russ Coxe3edfea2014-09-04 00:54:06 -0400767
768var persistent struct {
769 lock mutex
770 pos unsafe.Pointer
771 end unsafe.Pointer
772}
773
774// Wrapper around sysAlloc that can allocate small chunks.
775// There is no associated free operation.
776// Intended for things like function/type/debug-related persistent data.
777// If align is 0, uses default align (currently 8).
778func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
779 const (
780 chunk = 256 << 10
781 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
782 )
783
784 if align != 0 {
785 if align&(align-1) != 0 {
786 gothrow("persistentalloc: align is not a power of 2")
787 }
788 if align > _PageSize {
789 gothrow("persistentalloc: align is too large")
790 }
791 } else {
792 align = 8
793 }
794
795 if size >= maxBlock {
796 return sysAlloc(size, stat)
797 }
798
799 lock(&persistent.lock)
800 persistent.pos = roundup(persistent.pos, align)
801 if uintptr(persistent.pos)+size > uintptr(persistent.end) {
802 persistent.pos = sysAlloc(chunk, &memstats.other_sys)
803 if persistent.pos == nil {
804 unlock(&persistent.lock)
805 gothrow("runtime: cannot allocate memory")
806 }
807 persistent.end = add(persistent.pos, chunk)
808 }
809 p := persistent.pos
810 persistent.pos = add(persistent.pos, size)
811 unlock(&persistent.lock)
812
813 if stat != &memstats.other_sys {
814 xadd64(stat, int64(size))
815 xadd64(&memstats.other_sys, -int64(size))
816 }
817 return p
818}