Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 1 | // Copyright 2014 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
| 7 | import ( |
| 8 | "unsafe" |
| 9 | ) |
| 10 | |
| 11 | const ( |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 12 | debugMalloc = false |
| 13 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 14 | flagNoScan = _FlagNoScan |
| 15 | flagNoZero = _FlagNoZero |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 16 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 17 | maxTinySize = _TinySize |
| 18 | tinySizeClass = _TinySizeClass |
| 19 | maxSmallSize = _MaxSmallSize |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 20 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 21 | pageShift = _PageShift |
| 22 | pageSize = _PageSize |
| 23 | pageMask = _PageMask |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 24 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 25 | bitsPerPointer = _BitsPerPointer |
| 26 | bitsMask = _BitsMask |
| 27 | pointersPerByte = _PointersPerByte |
| 28 | maxGCMask = _MaxGCMask |
| 29 | bitsDead = _BitsDead |
| 30 | bitsPointer = _BitsPointer |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 31 | bitsScalar = _BitsScalar |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 32 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 33 | mSpanInUse = _MSpanInUse |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 34 | |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 35 | concurrentSweep = _ConcurrentSweep |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 36 | ) |
| 37 | |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 38 | // Page number (address>>pageShift) |
| 39 | type pageID uintptr |
| 40 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 41 | // base address for all 0-byte allocations |
| 42 | var zerobase uintptr |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 43 | |
Dmitriy Vyukov | 3950655 | 2014-08-05 17:03:06 +0400 | [diff] [blame] | 44 | // Allocate an object of size bytes. |
| 45 | // Small objects are allocated from the per-P cache's free lists. |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 46 | // Large objects (> 32 kB) are allocated straight from the heap. |
Austin Clements | 489ff75 | 2014-11-03 13:26:46 -0500 | [diff] [blame] | 47 | func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 48 | if size == 0 { |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 49 | return unsafe.Pointer(&zerobase) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 50 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 51 | size0 := size |
| 52 | |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 53 | if flags&flagNoScan == 0 && typ == nil { |
| 54 | gothrow("malloc missing type") |
| 55 | } |
| 56 | |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 57 | // This function must be atomic wrt GC, but for performance reasons |
| 58 | // we don't acquirem/releasem on fast path. The code below does not have |
| 59 | // split stack checks, so it can't be preempted by GC. |
| 60 | // Functions like roundup/add are inlined. And onM/racemalloc are nosplit. |
| 61 | // If debugMalloc = true, these assumptions are checked below. |
| 62 | if debugMalloc { |
| 63 | mp := acquirem() |
| 64 | if mp.mallocing != 0 { |
| 65 | gothrow("malloc deadlock") |
| 66 | } |
| 67 | mp.mallocing = 1 |
| 68 | if mp.curg != nil { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 69 | mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 70 | } |
| 71 | } |
| 72 | |
| 73 | c := gomcache() |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 74 | var s *mspan |
| 75 | var x unsafe.Pointer |
| 76 | if size <= maxSmallSize { |
| 77 | if flags&flagNoScan != 0 && size < maxTinySize { |
| 78 | // Tiny allocator. |
| 79 | // |
| 80 | // Tiny allocator combines several tiny allocation requests |
| 81 | // into a single memory block. The resulting memory block |
| 82 | // is freed when all subobjects are unreachable. The subobjects |
| 83 | // must be FlagNoScan (don't have pointers), this ensures that |
| 84 | // the amount of potentially wasted memory is bounded. |
| 85 | // |
| 86 | // Size of the memory block used for combining (maxTinySize) is tunable. |
| 87 | // Current setting is 16 bytes, which relates to 2x worst case memory |
| 88 | // wastage (when all but one subobjects are unreachable). |
| 89 | // 8 bytes would result in no wastage at all, but provides less |
| 90 | // opportunities for combining. |
| 91 | // 32 bytes provides more opportunities for combining, |
| 92 | // but can lead to 4x worst case wastage. |
| 93 | // The best case winning is 8x regardless of block size. |
| 94 | // |
| 95 | // Objects obtained from tiny allocator must not be freed explicitly. |
| 96 | // So when an object will be freed explicitly, we ensure that |
| 97 | // its size >= maxTinySize. |
| 98 | // |
| 99 | // SetFinalizer has a special case for objects potentially coming |
| 100 | // from tiny allocator, it such case it allows to set finalizers |
| 101 | // for an inner byte of a memory block. |
| 102 | // |
| 103 | // The main targets of tiny allocator are small strings and |
| 104 | // standalone escaping variables. On a json benchmark |
| 105 | // the allocator reduces number of allocations by ~12% and |
| 106 | // reduces heap size by ~20%. |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 107 | tinysize := uintptr(c.tinysize) |
| 108 | if size <= tinysize { |
| 109 | tiny := unsafe.Pointer(c.tiny) |
| 110 | // Align tiny pointer for required (conservative) alignment. |
| 111 | if size&7 == 0 { |
| 112 | tiny = roundup(tiny, 8) |
| 113 | } else if size&3 == 0 { |
| 114 | tiny = roundup(tiny, 4) |
| 115 | } else if size&1 == 0 { |
| 116 | tiny = roundup(tiny, 2) |
| 117 | } |
| 118 | size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny))) |
| 119 | if size1 <= tinysize { |
| 120 | // The object fits into existing tiny block. |
| 121 | x = tiny |
| 122 | c.tiny = (*byte)(add(x, size)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 123 | c.tinysize -= uintptr(size1) |
Russ Cox | e19d8a4 | 2014-09-17 14:49:32 -0400 | [diff] [blame] | 124 | c.local_tinyallocs++ |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 125 | if debugMalloc { |
| 126 | mp := acquirem() |
| 127 | if mp.mallocing == 0 { |
| 128 | gothrow("bad malloc") |
| 129 | } |
| 130 | mp.mallocing = 0 |
| 131 | if mp.curg != nil { |
Russ Cox | 15b76ad | 2014-09-09 13:39:57 -0400 | [diff] [blame] | 132 | mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 133 | } |
Keith Randall | 1d88f9d | 2014-09-08 15:42:48 -0700 | [diff] [blame] | 134 | // Note: one releasem for the acquirem just above. |
| 135 | // The other for the acquirem at start of malloc. |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 136 | releasem(mp) |
| 137 | releasem(mp) |
| 138 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 139 | return x |
| 140 | } |
| 141 | } |
| 142 | // Allocate a new maxTinySize block. |
| 143 | s = c.alloc[tinySizeClass] |
| 144 | v := s.freelist |
| 145 | if v == nil { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 146 | onM(func() { |
| 147 | mCache_Refill(c, tinySizeClass) |
| 148 | }) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 149 | s = c.alloc[tinySizeClass] |
| 150 | v = s.freelist |
| 151 | } |
| 152 | s.freelist = v.next |
| 153 | s.ref++ |
| 154 | //TODO: prefetch v.next |
| 155 | x = unsafe.Pointer(v) |
| 156 | (*[2]uint64)(x)[0] = 0 |
| 157 | (*[2]uint64)(x)[1] = 0 |
| 158 | // See if we need to replace the existing tiny block with the new one |
| 159 | // based on amount of remaining free space. |
| 160 | if maxTinySize-size > tinysize { |
| 161 | c.tiny = (*byte)(add(x, size)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 162 | c.tinysize = uintptr(maxTinySize - size) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 163 | } |
| 164 | size = maxTinySize |
| 165 | } else { |
| 166 | var sizeclass int8 |
| 167 | if size <= 1024-8 { |
| 168 | sizeclass = size_to_class8[(size+7)>>3] |
| 169 | } else { |
| 170 | sizeclass = size_to_class128[(size-1024+127)>>7] |
| 171 | } |
| 172 | size = uintptr(class_to_size[sizeclass]) |
| 173 | s = c.alloc[sizeclass] |
| 174 | v := s.freelist |
| 175 | if v == nil { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 176 | onM(func() { |
| 177 | mCache_Refill(c, int32(sizeclass)) |
| 178 | }) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 179 | s = c.alloc[sizeclass] |
| 180 | v = s.freelist |
| 181 | } |
| 182 | s.freelist = v.next |
| 183 | s.ref++ |
| 184 | //TODO: prefetch |
| 185 | x = unsafe.Pointer(v) |
| 186 | if flags&flagNoZero == 0 { |
| 187 | v.next = nil |
| 188 | if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 { |
| 189 | memclr(unsafe.Pointer(v), size) |
| 190 | } |
| 191 | } |
| 192 | } |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 193 | c.local_cachealloc += intptr(size) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 194 | } else { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 195 | var s *mspan |
| 196 | onM(func() { |
| 197 | s = largeAlloc(size, uint32(flags)) |
| 198 | }) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 199 | x = unsafe.Pointer(uintptr(s.start << pageShift)) |
| 200 | size = uintptr(s.elemsize) |
| 201 | } |
| 202 | |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 203 | if flags&flagNoScan != 0 { |
| 204 | // All objects are pre-marked as noscan. |
| 205 | goto marked |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 206 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 207 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 208 | // If allocating a defer+arg block, now that we've picked a malloc size |
| 209 | // large enough to hold everything, cut the "asked for" size down to |
| 210 | // just the defer header, so that the GC bitmap will record the arg block |
| 211 | // as containing nothing at all (as if it were unused space at the end of |
| 212 | // a malloc block caused by size rounding). |
| 213 | // The defer arg areas are scanned as part of scanstack. |
| 214 | if typ == deferType { |
| 215 | size0 = unsafe.Sizeof(_defer{}) |
| 216 | } |
| 217 | |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 218 | // From here till marked label marking the object as allocated |
| 219 | // and storing type info in the GC bitmap. |
| 220 | { |
| 221 | arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) |
| 222 | off := (uintptr(x) - arena_start) / ptrSize |
Dmitriy Vyukov | ff3fa1b | 2014-08-19 17:38:00 +0400 | [diff] [blame] | 223 | xbits := (*uint8)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1)) |
| 224 | shift := (off % wordsPerBitmapByte) * gcBits |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 225 | if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary { |
| 226 | println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask)) |
| 227 | gothrow("bad bits in markallocated") |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 228 | } |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 229 | |
| 230 | var ti, te uintptr |
| 231 | var ptrmask *uint8 |
| 232 | if size == ptrSize { |
| 233 | // It's one word and it has pointers, it must be a pointer. |
| 234 | *xbits |= (bitsPointer << 2) << shift |
| 235 | goto marked |
| 236 | } |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 237 | if typ.kind&kindGCProg != 0 { |
| 238 | nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize |
| 239 | masksize := nptr |
| 240 | if masksize%2 != 0 { |
| 241 | masksize *= 2 // repeated |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 242 | } |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 243 | masksize = masksize * pointersPerByte / 8 // 4 bits per word |
| 244 | masksize++ // unroll flag in the beginning |
| 245 | if masksize > maxGCMask && typ.gc[1] != 0 { |
| 246 | // If the mask is too large, unroll the program directly |
| 247 | // into the GC bitmap. It's 7 times slower than copying |
| 248 | // from the pre-unrolled mask, but saves 1/16 of type size |
| 249 | // memory for the mask. |
| 250 | mp := acquirem() |
| 251 | mp.ptrarg[0] = x |
| 252 | mp.ptrarg[1] = unsafe.Pointer(typ) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 253 | mp.scalararg[0] = uintptr(size) |
| 254 | mp.scalararg[1] = uintptr(size0) |
Russ Cox | 012ceed | 2014-09-03 11:35:22 -0400 | [diff] [blame] | 255 | onM(unrollgcproginplace_m) |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 256 | releasem(mp) |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 257 | goto marked |
| 258 | } |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 259 | ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) |
| 260 | // Check whether the program is already unrolled. |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 261 | if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 262 | mp := acquirem() |
| 263 | mp.ptrarg[0] = unsafe.Pointer(typ) |
Russ Cox | 012ceed | 2014-09-03 11:35:22 -0400 | [diff] [blame] | 264 | onM(unrollgcprog_m) |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 265 | releasem(mp) |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 266 | } |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 267 | ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte |
| 268 | } else { |
Russ Cox | 18172c4 | 2014-10-07 11:06:51 -0400 | [diff] [blame] | 269 | ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 270 | } |
| 271 | if size == 2*ptrSize { |
Dmitriy Vyukov | ff3fa1b | 2014-08-19 17:38:00 +0400 | [diff] [blame] | 272 | *xbits = *ptrmask | bitBoundary |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 273 | goto marked |
| 274 | } |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 275 | te = uintptr(typ.size) / ptrSize |
| 276 | // If the type occupies odd number of words, its mask is repeated. |
| 277 | if te%2 == 0 { |
| 278 | te /= 2 |
| 279 | } |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 280 | // Copy pointer bitmask into the bitmap. |
| 281 | for i := uintptr(0); i < size0; i += 2 * ptrSize { |
Dmitriy Vyukov | fb44fb6 | 2014-08-19 15:59:42 +0400 | [diff] [blame] | 282 | v := *(*uint8)(add(unsafe.Pointer(ptrmask), ti)) |
| 283 | ti++ |
| 284 | if ti == te { |
| 285 | ti = 0 |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 286 | } |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 287 | if i == 0 { |
| 288 | v |= bitBoundary |
| 289 | } |
| 290 | if i+ptrSize == size0 { |
| 291 | v &^= uint8(bitPtrMask << 4) |
| 292 | } |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 293 | |
Dmitriy Vyukov | ff3fa1b | 2014-08-19 17:38:00 +0400 | [diff] [blame] | 294 | *xbits = v |
| 295 | xbits = (*byte)(add(unsafe.Pointer(xbits), ^uintptr(0))) |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 296 | } |
| 297 | if size0%(2*ptrSize) == 0 && size0 < size { |
| 298 | // Mark the word after last object's word as bitsDead. |
Dmitriy Vyukov | ff3fa1b | 2014-08-19 17:38:00 +0400 | [diff] [blame] | 299 | *xbits = bitsDead << 2 |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 300 | } |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 301 | } |
| 302 | marked: |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 303 | if raceenabled { |
| 304 | racemalloc(x, size) |
| 305 | } |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 306 | |
| 307 | if debugMalloc { |
| 308 | mp := acquirem() |
| 309 | if mp.mallocing == 0 { |
| 310 | gothrow("bad malloc") |
| 311 | } |
| 312 | mp.mallocing = 0 |
| 313 | if mp.curg != nil { |
Russ Cox | 15b76ad | 2014-09-09 13:39:57 -0400 | [diff] [blame] | 314 | mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 315 | } |
Keith Randall | 1d88f9d | 2014-09-08 15:42:48 -0700 | [diff] [blame] | 316 | // Note: one releasem for the acquirem just above. |
| 317 | // The other for the acquirem at start of malloc. |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 318 | releasem(mp) |
| 319 | releasem(mp) |
| 320 | } |
| 321 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 322 | if debug.allocfreetrace != 0 { |
| 323 | tracealloc(x, size, typ) |
| 324 | } |
Dmitriy Vyukov | 1837419 | 2014-08-13 01:03:32 +0400 | [diff] [blame] | 325 | |
| 326 | if rate := MemProfileRate; rate > 0 { |
| 327 | if size < uintptr(rate) && int32(size) < c.next_sample { |
| 328 | c.next_sample -= int32(size) |
| 329 | } else { |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 330 | mp := acquirem() |
Dmitriy Vyukov | 1837419 | 2014-08-13 01:03:32 +0400 | [diff] [blame] | 331 | profilealloc(mp, x, size) |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 332 | releasem(mp) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 333 | } |
| 334 | } |
| 335 | |
Dmitriy Vyukov | cd2f835 | 2014-08-07 13:04:04 +0400 | [diff] [blame] | 336 | if memstats.heap_alloc >= memstats.next_gc { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 337 | gogc(0) |
| 338 | } |
| 339 | |
| 340 | return x |
| 341 | } |
| 342 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 343 | // implementation of new builtin |
| 344 | func newobject(typ *_type) unsafe.Pointer { |
Austin Clements | 489ff75 | 2014-11-03 13:26:46 -0500 | [diff] [blame] | 345 | flags := uint32(0) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 346 | if typ.kind&kindNoPointers != 0 { |
| 347 | flags |= flagNoScan |
| 348 | } |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 349 | return mallocgc(uintptr(typ.size), typ, flags) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | // implementation of make builtin for slices |
| 353 | func newarray(typ *_type, n uintptr) unsafe.Pointer { |
Austin Clements | 489ff75 | 2014-11-03 13:26:46 -0500 | [diff] [blame] | 354 | flags := uint32(0) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 355 | if typ.kind&kindNoPointers != 0 { |
| 356 | flags |= flagNoScan |
| 357 | } |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 358 | if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 359 | panic("runtime: allocation size out of range") |
| 360 | } |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 361 | return mallocgc(uintptr(typ.size)*n, typ, flags) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 362 | } |
| 363 | |
Keith Randall | cc9ec52 | 2014-07-31 12:43:40 -0700 | [diff] [blame] | 364 | // rawmem returns a chunk of pointerless memory. It is |
| 365 | // not zeroed. |
| 366 | func rawmem(size uintptr) unsafe.Pointer { |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 367 | return mallocgc(size, nil, flagNoScan|flagNoZero) |
Keith Randall | cc9ec52 | 2014-07-31 12:43:40 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 370 | // round size up to next size class |
| 371 | func goroundupsize(size uintptr) uintptr { |
| 372 | if size < maxSmallSize { |
| 373 | if size <= 1024-8 { |
| 374 | return uintptr(class_to_size[size_to_class8[(size+7)>>3]]) |
| 375 | } |
| 376 | return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]]) |
| 377 | } |
| 378 | if size+pageSize < size { |
| 379 | return size |
| 380 | } |
| 381 | return (size + pageSize - 1) &^ pageMask |
| 382 | } |
| 383 | |
| 384 | func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { |
| 385 | c := mp.mcache |
| 386 | rate := MemProfileRate |
| 387 | if size < uintptr(rate) { |
| 388 | // pick next profile time |
| 389 | // If you change this, also change allocmcache. |
| 390 | if rate > 0x3fffffff { // make 2*rate not overflow |
| 391 | rate = 0x3fffffff |
| 392 | } |
Keith Randall | 3306d11 | 2014-09-02 14:33:33 -0700 | [diff] [blame] | 393 | next := int32(fastrand1()) % (2 * int32(rate)) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 394 | // Subtract the "remainder" of the current allocation. |
| 395 | // Otherwise objects that are close in size to sampling rate |
| 396 | // will be under-sampled, because we consistently discard this remainder. |
| 397 | next -= (int32(size) - c.next_sample) |
| 398 | if next < 0 { |
| 399 | next = 0 |
| 400 | } |
| 401 | c.next_sample = next |
| 402 | } |
Russ Cox | 548d080 | 2014-09-01 18:51:12 -0400 | [diff] [blame] | 403 | |
| 404 | mProf_Malloc(x, size) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | // force = 1 - do GC regardless of current heap usage |
| 408 | // force = 2 - go GC and eager sweep |
| 409 | func gogc(force int32) { |
Dmitriy Vyukov | 9f38b6c | 2014-08-29 18:44:38 +0400 | [diff] [blame] | 410 | // The gc is turned off (via enablegc) until the bootstrap has completed. |
| 411 | // Also, malloc gets called in the guts of a number of libraries that might be |
| 412 | // holding locks. To avoid deadlocks during stoptheworld, don't bother |
| 413 | // trying to run gc while holding a lock. The next mallocgc without a lock |
| 414 | // will do the gc instead. |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 415 | mp := acquirem() |
Dmitriy Vyukov | 9f38b6c | 2014-08-29 18:44:38 +0400 | [diff] [blame] | 416 | if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 417 | releasem(mp) |
| 418 | return |
| 419 | } |
| 420 | releasem(mp) |
Dmitriy Vyukov | a0dbbea | 2014-08-21 11:46:53 +0400 | [diff] [blame] | 421 | mp = nil |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 422 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 423 | semacquire(&worldsema, false) |
| 424 | |
| 425 | if force == 0 && memstats.heap_alloc < memstats.next_gc { |
| 426 | // typically threads which lost the race to grab |
| 427 | // worldsema exit here when gc is done. |
| 428 | semrelease(&worldsema) |
| 429 | return |
| 430 | } |
| 431 | |
| 432 | // Ok, we're doing it! Stop everybody else |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 433 | startTime := nanotime() |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 434 | mp = acquirem() |
| 435 | mp.gcing = 1 |
Dmitriy Vyukov | a0dbbea | 2014-08-21 11:46:53 +0400 | [diff] [blame] | 436 | releasem(mp) |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 437 | onM(stoptheworld) |
Dmitriy Vyukov | a0dbbea | 2014-08-21 11:46:53 +0400 | [diff] [blame] | 438 | if mp != acquirem() { |
| 439 | gothrow("gogc: rescheduled") |
| 440 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 441 | |
| 442 | clearpools() |
| 443 | |
| 444 | // Run gc on the g0 stack. We do this so that the g stack |
| 445 | // we're currently running on will no longer change. Cuts |
| 446 | // the root set down a bit (g0 stacks are not scanned, and |
| 447 | // we don't need to scan gc's internal state). We also |
| 448 | // need to switch to g0 so we can shrink the stack. |
| 449 | n := 1 |
| 450 | if debug.gctrace > 1 { |
| 451 | n = 2 |
| 452 | } |
| 453 | for i := 0; i < n; i++ { |
| 454 | if i > 0 { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 455 | startTime = nanotime() |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 456 | } |
| 457 | // switch to g0, call gc, then switch back |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 458 | mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits |
| 459 | mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 460 | if force >= 2 { |
Dmitriy Vyukov | 266d350 | 2014-08-19 11:53:20 +0400 | [diff] [blame] | 461 | mp.scalararg[2] = 1 // eagersweep |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 462 | } else { |
Dmitriy Vyukov | 266d350 | 2014-08-19 11:53:20 +0400 | [diff] [blame] | 463 | mp.scalararg[2] = 0 |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 464 | } |
Russ Cox | 012ceed | 2014-09-03 11:35:22 -0400 | [diff] [blame] | 465 | onM(gc_m) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 466 | } |
| 467 | |
| 468 | // all done |
| 469 | mp.gcing = 0 |
| 470 | semrelease(&worldsema) |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 471 | onM(starttheworld) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 472 | releasem(mp) |
Dmitriy Vyukov | a0dbbea | 2014-08-21 11:46:53 +0400 | [diff] [blame] | 473 | mp = nil |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 474 | |
| 475 | // now that gc is done, kick off finalizer thread if needed |
| 476 | if !concurrentSweep { |
| 477 | // give the queued finalizers, if any, a chance to run |
Russ Cox | 15a5c35 | 2014-09-11 16:22:21 -0400 | [diff] [blame] | 478 | Gosched() |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 479 | } |
| 480 | } |
| 481 | |
| 482 | // GC runs a garbage collection. |
| 483 | func GC() { |
| 484 | gogc(2) |
| 485 | } |
| 486 | |
Russ Cox | 9a5b055 | 2014-10-06 14:18:09 -0400 | [diff] [blame] | 487 | // linker-provided |
| 488 | var noptrdata struct{} |
| 489 | var enoptrbss struct{} |
| 490 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 491 | // SetFinalizer sets the finalizer associated with x to f. |
| 492 | // When the garbage collector finds an unreachable block |
| 493 | // with an associated finalizer, it clears the association and runs |
| 494 | // f(x) in a separate goroutine. This makes x reachable again, but |
| 495 | // now without an associated finalizer. Assuming that SetFinalizer |
| 496 | // is not called again, the next time the garbage collector sees |
| 497 | // that x is unreachable, it will free x. |
| 498 | // |
| 499 | // SetFinalizer(x, nil) clears any finalizer associated with x. |
| 500 | // |
| 501 | // The argument x must be a pointer to an object allocated by |
| 502 | // calling new or by taking the address of a composite literal. |
| 503 | // The argument f must be a function that takes a single argument |
| 504 | // to which x's type can be assigned, and can have arbitrary ignored return |
| 505 | // values. If either of these is not true, SetFinalizer aborts the |
| 506 | // program. |
| 507 | // |
| 508 | // Finalizers are run in dependency order: if A points at B, both have |
| 509 | // finalizers, and they are otherwise unreachable, only the finalizer |
| 510 | // for A runs; once A is freed, the finalizer for B can run. |
| 511 | // If a cyclic structure includes a block with a finalizer, that |
| 512 | // cycle is not guaranteed to be garbage collected and the finalizer |
| 513 | // is not guaranteed to run, because there is no ordering that |
| 514 | // respects the dependencies. |
| 515 | // |
| 516 | // The finalizer for x is scheduled to run at some arbitrary time after |
| 517 | // x becomes unreachable. |
| 518 | // There is no guarantee that finalizers will run before a program exits, |
| 519 | // so typically they are useful only for releasing non-memory resources |
| 520 | // associated with an object during a long-running program. |
| 521 | // For example, an os.File object could use a finalizer to close the |
| 522 | // associated operating system file descriptor when a program discards |
| 523 | // an os.File without calling Close, but it would be a mistake |
| 524 | // to depend on a finalizer to flush an in-memory I/O buffer such as a |
| 525 | // bufio.Writer, because the buffer would not be flushed at program exit. |
| 526 | // |
| 527 | // It is not guaranteed that a finalizer will run if the size of *x is |
| 528 | // zero bytes. |
| 529 | // |
Russ Cox | 9a5b055 | 2014-10-06 14:18:09 -0400 | [diff] [blame] | 530 | // It is not guaranteed that a finalizer will run for objects allocated |
| 531 | // in initializers for package-level variables. Such objects may be |
| 532 | // linker-allocated, not heap-allocated. |
| 533 | // |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 534 | // A single goroutine runs all finalizers for a program, sequentially. |
| 535 | // If a finalizer must run for a long time, it should do so by starting |
| 536 | // a new goroutine. |
| 537 | func SetFinalizer(obj interface{}, finalizer interface{}) { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 538 | e := (*eface)(unsafe.Pointer(&obj)) |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 539 | etyp := e._type |
| 540 | if etyp == nil { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 541 | gothrow("runtime.SetFinalizer: first argument is nil") |
| 542 | } |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 543 | if etyp.kind&kindMask != kindPtr { |
| 544 | gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer") |
| 545 | } |
| 546 | ot := (*ptrtype)(unsafe.Pointer(etyp)) |
| 547 | if ot.elem == nil { |
| 548 | gothrow("nil elem type!") |
| 549 | } |
| 550 | |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 551 | // find the containing object |
| 552 | _, base, _ := findObject(e.data) |
| 553 | |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 554 | if base == nil { |
Russ Cox | 9a5b055 | 2014-10-06 14:18:09 -0400 | [diff] [blame] | 555 | // 0-length objects are okay. |
| 556 | if e.data == unsafe.Pointer(&zerobase) { |
| 557 | return |
| 558 | } |
| 559 | |
| 560 | // Global initializers might be linker-allocated. |
| 561 | // var Foo = &Object{} |
| 562 | // func main() { |
| 563 | // runtime.SetFinalizer(Foo, nil) |
| 564 | // } |
| 565 | // The segments are, in order: text, rodata, noptrdata, data, bss, noptrbss. |
| 566 | if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) { |
| 567 | return |
| 568 | } |
| 569 | gothrow("runtime.SetFinalizer: pointer not in allocated block") |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | if e.data != base { |
| 573 | // As an implementation detail we allow to set finalizers for an inner byte |
| 574 | // of an object if it could come from tiny alloc (see mallocgc for details). |
| 575 | if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize { |
| 576 | gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block") |
| 577 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 578 | } |
| 579 | |
| 580 | f := (*eface)(unsafe.Pointer(&finalizer)) |
| 581 | ftyp := f._type |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 582 | if ftyp == nil { |
| 583 | // switch to M stack and remove finalizer |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 584 | onM(func() { |
| 585 | removefinalizer(e.data) |
| 586 | }) |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 587 | return |
| 588 | } |
| 589 | |
| 590 | if ftyp.kind&kindMask != kindFunc { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 591 | gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function") |
| 592 | } |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 593 | ft := (*functype)(unsafe.Pointer(ftyp)) |
| 594 | ins := *(*[]*_type)(unsafe.Pointer(&ft.in)) |
| 595 | if ft.dotdotdot || len(ins) != 1 { |
| 596 | gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) |
| 597 | } |
| 598 | fint := ins[0] |
| 599 | switch { |
| 600 | case fint == etyp: |
| 601 | // ok - same type |
| 602 | goto okarg |
| 603 | case fint.kind&kindMask == kindPtr: |
| 604 | if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem { |
| 605 | // ok - not same type, but both pointers, |
| 606 | // one or the other is unnamed, and same element type, so assignable. |
| 607 | goto okarg |
| 608 | } |
| 609 | case fint.kind&kindMask == kindInterface: |
| 610 | ityp := (*interfacetype)(unsafe.Pointer(fint)) |
| 611 | if len(ityp.mhdr) == 0 { |
| 612 | // ok - satisfies empty interface |
| 613 | goto okarg |
| 614 | } |
| 615 | if _, ok := assertE2I2(ityp, obj); ok { |
| 616 | goto okarg |
| 617 | } |
| 618 | } |
| 619 | gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) |
| 620 | okarg: |
| 621 | // compute size needed for return parameters |
| 622 | nret := uintptr(0) |
| 623 | for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) { |
| 624 | nret = round(nret, uintptr(t.align)) + uintptr(t.size) |
| 625 | } |
| 626 | nret = round(nret, ptrSize) |
| 627 | |
| 628 | // make sure we have a finalizer goroutine |
| 629 | createfing() |
| 630 | |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame^] | 631 | onM(func() { |
| 632 | if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) { |
| 633 | gothrow("runtime.SetFinalizer: finalizer already set") |
| 634 | } |
| 635 | }) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 636 | } |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 637 | |
| 638 | // round n up to a multiple of a. a must be a power of 2. |
| 639 | func round(n, a uintptr) uintptr { |
| 640 | return (n + a - 1) &^ (a - 1) |
| 641 | } |
| 642 | |
| 643 | // Look up pointer v in heap. Return the span containing the object, |
| 644 | // the start of the object, and the size of the object. If the object |
| 645 | // does not exist, return nil, nil, 0. |
| 646 | func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { |
| 647 | c := gomcache() |
| 648 | c.local_nlookup++ |
| 649 | if ptrSize == 4 && c.local_nlookup >= 1<<30 { |
| 650 | // purge cache stats to prevent overflow |
| 651 | lock(&mheap_.lock) |
| 652 | purgecachedstats(c) |
| 653 | unlock(&mheap_.lock) |
| 654 | } |
| 655 | |
| 656 | // find span |
| 657 | arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) |
| 658 | arena_used := uintptr(unsafe.Pointer(mheap_.arena_used)) |
| 659 | if uintptr(v) < arena_start || uintptr(v) >= arena_used { |
| 660 | return |
| 661 | } |
| 662 | p := uintptr(v) >> pageShift |
| 663 | q := p - arena_start>>pageShift |
| 664 | s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize)) |
| 665 | if s == nil { |
| 666 | return |
| 667 | } |
| 668 | x = unsafe.Pointer(uintptr(s.start) << pageShift) |
| 669 | |
| 670 | if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse { |
| 671 | s = nil |
| 672 | x = nil |
| 673 | return |
| 674 | } |
| 675 | |
| 676 | n = uintptr(s.elemsize) |
| 677 | if s.sizeclass != 0 { |
| 678 | x = add(x, (uintptr(v)-uintptr(x))/n*n) |
| 679 | } |
| 680 | return |
| 681 | } |
| 682 | |
| 683 | var fingCreate uint32 |
| 684 | |
| 685 | func createfing() { |
| 686 | // start the finalizer goroutine exactly once |
| 687 | if fingCreate == 0 && cas(&fingCreate, 0, 1) { |
| 688 | go runfinq() |
| 689 | } |
| 690 | } |
| 691 | |
| 692 | // This is the goroutine that runs all of the finalizers |
| 693 | func runfinq() { |
| 694 | var ( |
| 695 | frame unsafe.Pointer |
| 696 | framecap uintptr |
| 697 | ) |
| 698 | |
| 699 | for { |
| 700 | lock(&finlock) |
| 701 | fb := finq |
| 702 | finq = nil |
| 703 | if fb == nil { |
| 704 | gp := getg() |
| 705 | fing = gp |
| 706 | fingwait = true |
| 707 | gp.issystem = true |
| 708 | goparkunlock(&finlock, "finalizer wait") |
| 709 | gp.issystem = false |
| 710 | continue |
| 711 | } |
| 712 | unlock(&finlock) |
| 713 | if raceenabled { |
| 714 | racefingo() |
| 715 | } |
| 716 | for fb != nil { |
| 717 | for i := int32(0); i < fb.cnt; i++ { |
| 718 | f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{}))) |
| 719 | |
| 720 | framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret) |
| 721 | if framecap < framesz { |
| 722 | // The frame does not contain pointers interesting for GC, |
| 723 | // all not yet finalized objects are stored in finq. |
| 724 | // If we do not mark it as FlagNoScan, |
| 725 | // the last finalized object is not collected. |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 726 | frame = mallocgc(framesz, nil, flagNoScan) |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 727 | framecap = framesz |
| 728 | } |
| 729 | |
| 730 | if f.fint == nil { |
| 731 | gothrow("missing type in runfinq") |
| 732 | } |
| 733 | switch f.fint.kind & kindMask { |
| 734 | case kindPtr: |
| 735 | // direct use of pointer |
| 736 | *(*unsafe.Pointer)(frame) = f.arg |
| 737 | case kindInterface: |
| 738 | ityp := (*interfacetype)(unsafe.Pointer(f.fint)) |
| 739 | // set up with empty interface |
| 740 | (*eface)(frame)._type = &f.ot.typ |
| 741 | (*eface)(frame).data = f.arg |
| 742 | if len(ityp.mhdr) != 0 { |
| 743 | // convert to interface with methods |
| 744 | // this conversion is guaranteed to succeed - we checked in SetFinalizer |
| 745 | *(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame)) |
| 746 | } |
| 747 | default: |
| 748 | gothrow("bad kind in runfinq") |
| 749 | } |
Russ Cox | 8473695 | 2014-09-06 13:19:08 -0400 | [diff] [blame] | 750 | reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz)) |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 751 | |
| 752 | // drop finalizer queue references to finalized object |
| 753 | f.fn = nil |
| 754 | f.arg = nil |
| 755 | f.ot = nil |
| 756 | } |
| 757 | fb.cnt = 0 |
| 758 | next := fb.next |
| 759 | lock(&finlock) |
| 760 | fb.next = finc |
| 761 | finc = fb |
| 762 | unlock(&finlock) |
| 763 | fb = next |
| 764 | } |
| 765 | } |
| 766 | } |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 767 | |
| 768 | var persistent struct { |
| 769 | lock mutex |
| 770 | pos unsafe.Pointer |
| 771 | end unsafe.Pointer |
| 772 | } |
| 773 | |
| 774 | // Wrapper around sysAlloc that can allocate small chunks. |
| 775 | // There is no associated free operation. |
| 776 | // Intended for things like function/type/debug-related persistent data. |
| 777 | // If align is 0, uses default align (currently 8). |
| 778 | func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { |
| 779 | const ( |
| 780 | chunk = 256 << 10 |
| 781 | maxBlock = 64 << 10 // VM reservation granularity is 64K on windows |
| 782 | ) |
| 783 | |
| 784 | if align != 0 { |
| 785 | if align&(align-1) != 0 { |
| 786 | gothrow("persistentalloc: align is not a power of 2") |
| 787 | } |
| 788 | if align > _PageSize { |
| 789 | gothrow("persistentalloc: align is too large") |
| 790 | } |
| 791 | } else { |
| 792 | align = 8 |
| 793 | } |
| 794 | |
| 795 | if size >= maxBlock { |
| 796 | return sysAlloc(size, stat) |
| 797 | } |
| 798 | |
| 799 | lock(&persistent.lock) |
| 800 | persistent.pos = roundup(persistent.pos, align) |
| 801 | if uintptr(persistent.pos)+size > uintptr(persistent.end) { |
| 802 | persistent.pos = sysAlloc(chunk, &memstats.other_sys) |
| 803 | if persistent.pos == nil { |
| 804 | unlock(&persistent.lock) |
| 805 | gothrow("runtime: cannot allocate memory") |
| 806 | } |
| 807 | persistent.end = add(persistent.pos, chunk) |
| 808 | } |
| 809 | p := persistent.pos |
| 810 | persistent.pos = add(persistent.pos, size) |
| 811 | unlock(&persistent.lock) |
| 812 | |
| 813 | if stat != &memstats.other_sys { |
| 814 | xadd64(stat, int64(size)) |
| 815 | xadd64(&memstats.other_sys, -int64(size)) |
| 816 | } |
| 817 | return p |
| 818 | } |