Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 1 | // Copyright 2014 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 5 | // Memory allocator, based on tcmalloc. |
| 6 | // http://goog-perftools.sourceforge.net/doc/tcmalloc.html |
| 7 | |
| 8 | // The main allocator works in runs of pages. |
| 9 | // Small allocation sizes (up to and including 32 kB) are |
| 10 | // rounded to one of about 100 size classes, each of which |
| 11 | // has its own free list of objects of exactly that size. |
| 12 | // Any free page of memory can be split into a set of objects |
| 13 | // of one size class, which are then managed using free list |
| 14 | // allocators. |
| 15 | // |
| 16 | // The allocator's data structures are: |
| 17 | // |
| 18 | // FixAlloc: a free-list allocator for fixed-size objects, |
| 19 | // used to manage storage used by the allocator. |
| 20 | // MHeap: the malloc heap, managed at page (4096-byte) granularity. |
| 21 | // MSpan: a run of pages managed by the MHeap. |
| 22 | // MCentral: a shared free list for a given size class. |
| 23 | // MCache: a per-thread (in Go, per-P) cache for small objects. |
| 24 | // MStats: allocation statistics. |
| 25 | // |
| 26 | // Allocating a small object proceeds up a hierarchy of caches: |
| 27 | // |
| 28 | // 1. Round the size up to one of the small size classes |
| 29 | // and look in the corresponding MCache free list. |
| 30 | // If the list is not empty, allocate an object from it. |
| 31 | // This can all be done without acquiring a lock. |
| 32 | // |
| 33 | // 2. If the MCache free list is empty, replenish it by |
| 34 | // taking a bunch of objects from the MCentral free list. |
| 35 | // Moving a bunch amortizes the cost of acquiring the MCentral lock. |
| 36 | // |
| 37 | // 3. If the MCentral free list is empty, replenish it by |
| 38 | // allocating a run of pages from the MHeap and then |
| 39 | // chopping that memory into objects of the given size. |
| 40 | // Allocating many objects amortizes the cost of locking |
| 41 | // the heap. |
| 42 | // |
| 43 | // 4. If the MHeap is empty or has no page runs large enough, |
| 44 | // allocate a new group of pages (at least 1MB) from the |
| 45 | // operating system. Allocating a large run of pages |
| 46 | // amortizes the cost of talking to the operating system. |
| 47 | // |
| 48 | // Freeing a small object proceeds up the same hierarchy: |
| 49 | // |
| 50 | // 1. Look up the size class for the object and add it to |
| 51 | // the MCache free list. |
| 52 | // |
| 53 | // 2. If the MCache free list is too long or the MCache has |
| 54 | // too much memory, return some to the MCentral free lists. |
| 55 | // |
| 56 | // 3. If all the objects in a given span have returned to |
| 57 | // the MCentral list, return that span to the page heap. |
| 58 | // |
| 59 | // 4. If the heap has too much memory, return some to the |
| 60 | // operating system. |
| 61 | // |
| 62 | // TODO(rsc): Step 4 is not implemented. |
| 63 | // |
| 64 | // Allocating and freeing a large object uses the page heap |
| 65 | // directly, bypassing the MCache and MCentral free lists. |
| 66 | // |
| 67 | // The small objects on the MCache and MCentral free lists |
| 68 | // may or may not be zeroed. They are zeroed if and only if |
| 69 | // the second word of the object is zero. A span in the |
| 70 | // page heap is zeroed unless s->needzero is set. When a span |
| 71 | // is allocated to break into small objects, it is zeroed if needed |
| 72 | // and s->needzero is set. There are two main benefits to delaying the |
| 73 | // zeroing this way: |
| 74 | // |
| 75 | // 1. stack frames allocated from the small object lists |
| 76 | // or the page heap can avoid zeroing altogether. |
| 77 | // 2. the cost of zeroing when reusing a small object is |
| 78 | // charged to the mutator, not the garbage collector. |
| 79 | // |
| 80 | // This code was written with an eye toward translating to Go |
| 81 | // in the future. Methods have the form Type_Method(Type *t, ...). |
| 82 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 83 | package runtime |
| 84 | |
Russ Cox | fd21b9f | 2014-08-15 15:22:33 -0400 | [diff] [blame] | 85 | import "unsafe" |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 86 | |
| 87 | const ( |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 88 | debugMalloc = false |
| 89 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 90 | flagNoScan = _FlagNoScan |
| 91 | flagNoZero = _FlagNoZero |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 92 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 93 | maxTinySize = _TinySize |
| 94 | tinySizeClass = _TinySizeClass |
| 95 | maxSmallSize = _MaxSmallSize |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 96 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 97 | pageShift = _PageShift |
| 98 | pageSize = _PageSize |
| 99 | pageMask = _PageMask |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 100 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 101 | mSpanInUse = _MSpanInUse |
Keith Randall | c46bcd4 | 2014-08-28 13:23:10 -0700 | [diff] [blame] | 102 | |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 103 | concurrentSweep = _ConcurrentSweep |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 104 | ) |
| 105 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 106 | const ( |
| 107 | _PageShift = 13 |
| 108 | _PageSize = 1 << _PageShift |
| 109 | _PageMask = _PageSize - 1 |
| 110 | ) |
| 111 | |
| 112 | const ( |
| 113 | // _64bit = 1 on 64-bit systems, 0 on 32-bit systems |
| 114 | _64bit = 1 << (^uintptr(0) >> 63) / 2 |
| 115 | |
| 116 | // Computed constant. The definition of MaxSmallSize and the |
Keith Randall | cd5b144 | 2015-03-11 12:58:47 -0700 | [diff] [blame] | 117 | // algorithm in msize.go produces some number of different allocation |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 118 | // size classes. NumSizeClasses is that number. It's needed here |
| 119 | // because there are static arrays of this length; when msize runs its |
| 120 | // size choosing algorithm it double-checks that NumSizeClasses agrees. |
| 121 | _NumSizeClasses = 67 |
| 122 | |
| 123 | // Tunable constants. |
| 124 | _MaxSmallSize = 32 << 10 |
| 125 | |
| 126 | // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. |
| 127 | _TinySize = 16 |
| 128 | _TinySizeClass = 2 |
| 129 | |
| 130 | _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc |
| 131 | _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. |
| 132 | _HeapAllocChunk = 1 << 20 // Chunk size for heap growth |
| 133 | |
| 134 | // Per-P, per order stack segment cache size. |
| 135 | _StackCacheSize = 32 * 1024 |
| 136 | |
| 137 | // Number of orders that get caching. Order 0 is FixedStack |
| 138 | // and each successive order is twice as large. |
| 139 | // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks |
| 140 | // will be allocated directly. |
| 141 | // Since FixedStack is different on different systems, we |
| 142 | // must vary NumStackOrders to keep the same maximum cached size. |
| 143 | // OS | FixedStack | NumStackOrders |
| 144 | // -----------------+------------+--------------- |
| 145 | // linux/darwin/bsd | 2KB | 4 |
| 146 | // windows/32 | 4KB | 3 |
| 147 | // windows/64 | 8KB | 2 |
| 148 | // plan9 | 4KB | 3 |
| 149 | _NumStackOrders = 4 - ptrSize/4*goos_windows - 1*goos_plan9 |
| 150 | |
| 151 | // Number of bits in page to span calculations (4k pages). |
| 152 | // On Windows 64-bit we limit the arena to 32GB or 35 bits. |
| 153 | // Windows counts memory used by page table into committed memory |
| 154 | // of the process, so we can't reserve too much memory. |
Brad Fitzpatrick | 2ae7737 | 2015-07-10 17:17:11 -0600 | [diff] [blame] | 155 | // See https://golang.org/issue/5402 and https://golang.org/issue/5236. |
Russ Cox | 43aac4f | 2015-06-08 00:14:08 -0400 | [diff] [blame] | 156 | // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits. |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 157 | // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address. |
Shenghou Ma | 4a71b91 | 2015-04-10 22:14:43 -0400 | [diff] [blame] | 158 | // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory, |
| 159 | // but as most devices have less than 4GB of physical memory anyway, we |
| 160 | // try to be conservative here, and only ask for a 2GB heap. |
Russ Cox | 43aac4f | 2015-06-08 00:14:08 -0400 | [diff] [blame] | 161 | _MHeapMap_TotalBits = (_64bit*goos_windows)*35 + (_64bit*(1-goos_windows)*(1-goos_darwin*goarch_arm64))*39 + goos_darwin*goarch_arm64*31 + (1-_64bit)*32 |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 162 | _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift |
| 163 | |
| 164 | _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1) |
| 165 | |
| 166 | // Max number of threads to run garbage collection. |
| 167 | // 2, 3, and 4 are all plausible maximums depending |
| 168 | // on the hardware details of the machine. The garbage |
| 169 | // collector scales well to 32 cpus. |
| 170 | _MaxGcproc = 32 |
| 171 | ) |
| 172 | |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 173 | // Page number (address>>pageShift) |
| 174 | type pageID uintptr |
| 175 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 176 | const _MaxArena32 = 2 << 30 |
| 177 | |
| 178 | // OS-defined helpers: |
| 179 | // |
| 180 | // sysAlloc obtains a large chunk of zeroed memory from the |
| 181 | // operating system, typically on the order of a hundred kilobytes |
| 182 | // or a megabyte. |
| 183 | // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator |
| 184 | // may use larger alignment, so the caller must be careful to realign the |
| 185 | // memory obtained by sysAlloc. |
| 186 | // |
| 187 | // SysUnused notifies the operating system that the contents |
| 188 | // of the memory region are no longer needed and can be reused |
| 189 | // for other purposes. |
| 190 | // SysUsed notifies the operating system that the contents |
| 191 | // of the memory region are needed again. |
| 192 | // |
| 193 | // SysFree returns it unconditionally; this is only used if |
| 194 | // an out-of-memory error has been detected midway through |
| 195 | // an allocation. It is okay if SysFree is a no-op. |
| 196 | // |
| 197 | // SysReserve reserves address space without allocating memory. |
| 198 | // If the pointer passed to it is non-nil, the caller wants the |
| 199 | // reservation there, but SysReserve can still choose another |
| 200 | // location if that one is unavailable. On some systems and in some |
| 201 | // cases SysReserve will simply check that the address space is |
| 202 | // available and not actually reserve it. If SysReserve returns |
| 203 | // non-nil, it sets *reserved to true if the address space is |
| 204 | // reserved, false if it has merely been checked. |
| 205 | // NOTE: SysReserve returns OS-aligned memory, but the heap allocator |
| 206 | // may use larger alignment, so the caller must be careful to realign the |
| 207 | // memory obtained by sysAlloc. |
| 208 | // |
| 209 | // SysMap maps previously reserved address space for use. |
| 210 | // The reserved argument is true if the address space was really |
| 211 | // reserved, not merely checked. |
| 212 | // |
| 213 | // SysFault marks a (already sysAlloc'd) region to fault |
| 214 | // if accessed. Used only for debugging the runtime. |
| 215 | |
| 216 | func mallocinit() { |
| 217 | initSizes() |
| 218 | |
| 219 | if class_to_size[_TinySizeClass] != _TinySize { |
| 220 | throw("bad TinySizeClass") |
| 221 | } |
| 222 | |
| 223 | var p, bitmapSize, spansSize, pSize, limit uintptr |
| 224 | var reserved bool |
| 225 | |
| 226 | // limit = runtime.memlimit(); |
| 227 | // See https://golang.org/issue/5049 |
| 228 | // TODO(rsc): Fix after 1.1. |
| 229 | limit = 0 |
| 230 | |
| 231 | // Set up the allocation arena, a contiguous area of memory where |
| 232 | // allocated data will be found. The arena begins with a bitmap large |
| 233 | // enough to hold 4 bits per allocated word. |
| 234 | if ptrSize == 8 && (limit == 0 || limit > 1<<30) { |
| 235 | // On a 64-bit machine, allocate from a single contiguous reservation. |
Russ Cox | 43aac4f | 2015-06-08 00:14:08 -0400 | [diff] [blame] | 236 | // 512 GB (MaxMem) should be big enough for now. |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 237 | // |
| 238 | // The code will work with the reservation at any address, but ask |
| 239 | // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). |
Russ Cox | 43aac4f | 2015-06-08 00:14:08 -0400 | [diff] [blame] | 240 | // Allocating a 512 GB region takes away 39 bits, and the amd64 |
| 241 | // doesn't let us choose the top 17 bits, so that leaves the 9 bits |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 242 | // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means |
| 243 | // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. |
| 244 | // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid |
| 245 | // UTF-8 sequences, and they are otherwise as far away from |
| 246 | // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 |
| 247 | // addresses. An earlier attempt to use 0x11f8 caused out of memory errors |
| 248 | // on OS X during thread allocations. 0x00c0 causes conflicts with |
| 249 | // AddressSanitizer which reserves all memory up to 0x0100. |
| 250 | // These choices are both for debuggability and to reduce the |
Russ Cox | 43aac4f | 2015-06-08 00:14:08 -0400 | [diff] [blame] | 251 | // odds of a conservative garbage collector (as is still used in gccgo) |
| 252 | // not collecting memory because some non-pointer block of memory |
| 253 | // had a bit pattern that matched a memory address. |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 254 | // |
Russ Cox | 43aac4f | 2015-06-08 00:14:08 -0400 | [diff] [blame] | 255 | // Actually we reserve 544 GB (because the bitmap ends up being 32 GB) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 256 | // but it hardly matters: e0 00 is not valid UTF-8 either. |
| 257 | // |
| 258 | // If this fails we fall back to the 32 bit memory mechanism |
Aram Hăvărneanu | 846ee04 | 2015-03-08 14:20:20 +0100 | [diff] [blame] | 259 | // |
| 260 | // However, on arm64, we ignore all this advice above and slam the |
| 261 | // allocation at 0x40 << 32 because when using 4k pages with 3-level |
| 262 | // translation buffers, the user address space is limited to 39 bits |
Shenghou Ma | 4a71b91 | 2015-04-10 22:14:43 -0400 | [diff] [blame] | 263 | // On darwin/arm64, the address space is even smaller. |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 264 | arenaSize := round(_MaxMem, _PageSize) |
| 265 | bitmapSize = arenaSize / (ptrSize * 8 / 4) |
| 266 | spansSize = arenaSize / _PageSize * ptrSize |
| 267 | spansSize = round(spansSize, _PageSize) |
| 268 | for i := 0; i <= 0x7f; i++ { |
Shenghou Ma | 4a71b91 | 2015-04-10 22:14:43 -0400 | [diff] [blame] | 269 | switch { |
| 270 | case GOARCH == "arm64" && GOOS == "darwin": |
| 271 | p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) |
| 272 | case GOARCH == "arm64": |
Aram Hăvărneanu | 846ee04 | 2015-03-08 14:20:20 +0100 | [diff] [blame] | 273 | p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) |
Shenghou Ma | 4a71b91 | 2015-04-10 22:14:43 -0400 | [diff] [blame] | 274 | default: |
Aram Hăvărneanu | 846ee04 | 2015-03-08 14:20:20 +0100 | [diff] [blame] | 275 | p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) |
| 276 | } |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 277 | pSize = bitmapSize + spansSize + arenaSize + _PageSize |
| 278 | p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) |
| 279 | if p != 0 { |
| 280 | break |
| 281 | } |
| 282 | } |
| 283 | } |
| 284 | |
| 285 | if p == 0 { |
| 286 | // On a 32-bit machine, we can't typically get away |
| 287 | // with a giant virtual address space reservation. |
| 288 | // Instead we map the memory information bitmap |
| 289 | // immediately after the data segment, large enough |
| 290 | // to handle another 2GB of mappings (256 MB), |
| 291 | // along with a reservation for an initial arena. |
| 292 | // When that gets used up, we'll start asking the kernel |
| 293 | // for any memory anywhere and hope it's in the 2GB |
| 294 | // following the bitmap (presumably the executable begins |
| 295 | // near the bottom of memory, so we'll have to use up |
| 296 | // most of memory before the kernel resorts to giving out |
| 297 | // memory before the beginning of the text segment). |
| 298 | // |
| 299 | // Alternatively we could reserve 512 MB bitmap, enough |
| 300 | // for 4GB of mappings, and then accept any memory the |
| 301 | // kernel threw at us, but normally that's a waste of 512 MB |
| 302 | // of address space, which is probably too much in a 32-bit world. |
| 303 | |
| 304 | // If we fail to allocate, try again with a smaller arena. |
| 305 | // This is necessary on Android L where we share a process |
| 306 | // with ART, which reserves virtual memory aggressively. |
| 307 | arenaSizes := []uintptr{ |
| 308 | 512 << 20, |
| 309 | 256 << 20, |
David Crawshaw | 85d0957 | 2015-02-24 11:11:56 -0500 | [diff] [blame] | 310 | 128 << 20, |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | for _, arenaSize := range arenaSizes { |
| 314 | bitmapSize = _MaxArena32 / (ptrSize * 8 / 4) |
| 315 | spansSize = _MaxArena32 / _PageSize * ptrSize |
| 316 | if limit > 0 && arenaSize+bitmapSize+spansSize > limit { |
| 317 | bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1) |
| 318 | arenaSize = bitmapSize * 8 |
| 319 | spansSize = arenaSize / _PageSize * ptrSize |
| 320 | } |
| 321 | spansSize = round(spansSize, _PageSize) |
| 322 | |
| 323 | // SysReserve treats the address we ask for, end, as a hint, |
| 324 | // not as an absolute requirement. If we ask for the end |
| 325 | // of the data segment but the operating system requires |
| 326 | // a little more space before we can start allocating, it will |
| 327 | // give out a slightly higher pointer. Except QEMU, which |
| 328 | // is buggy, as usual: it won't adjust the pointer upward. |
| 329 | // So adjust it upward a little bit ourselves: 1/4 MB to get |
| 330 | // away from the running binary image and then round up |
| 331 | // to a MB boundary. |
Michael Hudson-Doyle | a1f5759 | 2015-04-07 12:55:02 +1200 | [diff] [blame] | 332 | p = round(firstmoduledata.end+(1<<18), 1<<20) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 333 | pSize = bitmapSize + spansSize + arenaSize + _PageSize |
| 334 | p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved)) |
| 335 | if p != 0 { |
| 336 | break |
| 337 | } |
| 338 | } |
| 339 | if p == 0 { |
| 340 | throw("runtime: cannot reserve arena virtual address space") |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | // PageSize can be larger than OS definition of page size, |
| 345 | // so SysReserve can give us a PageSize-unaligned pointer. |
| 346 | // To overcome this we ask for PageSize more and round up the pointer. |
| 347 | p1 := round(p, _PageSize) |
| 348 | |
| 349 | mheap_.spans = (**mspan)(unsafe.Pointer(p1)) |
| 350 | mheap_.bitmap = p1 + spansSize |
| 351 | mheap_.arena_start = p1 + (spansSize + bitmapSize) |
| 352 | mheap_.arena_used = mheap_.arena_start |
| 353 | mheap_.arena_end = p + pSize |
| 354 | mheap_.arena_reserved = reserved |
| 355 | |
| 356 | if mheap_.arena_start&(_PageSize-1) != 0 { |
| 357 | println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start)) |
| 358 | throw("misrounded allocation in mallocinit") |
| 359 | } |
| 360 | |
| 361 | // Initialize the rest of the allocator. |
| 362 | mHeap_Init(&mheap_, spansSize) |
| 363 | _g_ := getg() |
| 364 | _g_.m.mcache = allocmcache() |
| 365 | } |
| 366 | |
| 367 | // sysReserveHigh reserves space somewhere high in the address space. |
| 368 | // sysReserve doesn't actually reserve the full amount requested on |
| 369 | // 64-bit systems, because of problems with ulimit. Instead it checks |
| 370 | // that it can get the first 64 kB and assumes it can grab the rest as |
| 371 | // needed. This doesn't work well with the "let the kernel pick an address" |
| 372 | // mode, so don't do that. Pick a high address instead. |
| 373 | func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer { |
| 374 | if ptrSize == 4 { |
| 375 | return sysReserve(nil, n, reserved) |
| 376 | } |
| 377 | |
| 378 | for i := 0; i <= 0x7f; i++ { |
| 379 | p := uintptr(i)<<40 | uintptrMask&(0x00c0<<32) |
| 380 | *reserved = false |
| 381 | p = uintptr(sysReserve(unsafe.Pointer(p), n, reserved)) |
| 382 | if p != 0 { |
| 383 | return unsafe.Pointer(p) |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | return sysReserve(nil, n, reserved) |
| 388 | } |
| 389 | |
| 390 | func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer { |
Matthew Dempsky | 4ff231b | 2015-10-26 17:53:22 -0700 | [diff] [blame] | 391 | if n > h.arena_end-h.arena_used { |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 392 | // We are in 32-bit mode, maybe we didn't use all possible address space yet. |
| 393 | // Reserve some more space. |
| 394 | p_size := round(n+_PageSize, 256<<20) |
| 395 | new_end := h.arena_end + p_size |
| 396 | if new_end <= h.arena_start+_MaxArena32 { |
| 397 | // TODO: It would be bad if part of the arena |
| 398 | // is reserved and part is not. |
| 399 | var reserved bool |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 400 | p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved)) |
Joel Sing | 1d5251f | 2015-09-27 03:56:05 +1000 | [diff] [blame] | 401 | if p == 0 { |
| 402 | return nil |
| 403 | } |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 404 | if p == h.arena_end { |
| 405 | h.arena_end = new_end |
| 406 | h.arena_reserved = reserved |
| 407 | } else if p+p_size <= h.arena_start+_MaxArena32 { |
| 408 | // Keep everything page-aligned. |
| 409 | // Our pages are bigger than hardware pages. |
| 410 | h.arena_end = p + p_size |
Austin Clements | 9a3112b | 2015-06-22 11:18:23 -0400 | [diff] [blame] | 411 | used := p + (-uintptr(p) & (_PageSize - 1)) |
| 412 | mHeap_MapBits(h, used) |
| 413 | mHeap_MapSpans(h, used) |
| 414 | h.arena_used = used |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 415 | h.arena_reserved = reserved |
| 416 | } else { |
| 417 | var stat uint64 |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 418 | sysFree(unsafe.Pointer(p), p_size, &stat) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 419 | } |
| 420 | } |
| 421 | } |
| 422 | |
Matthew Dempsky | 4ff231b | 2015-10-26 17:53:22 -0700 | [diff] [blame] | 423 | if n <= h.arena_end-h.arena_used { |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 424 | // Keep taking from our reservation. |
| 425 | p := h.arena_used |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 426 | sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) |
Russ Cox | d57c889 | 2015-06-07 22:59:29 -0400 | [diff] [blame] | 427 | mHeap_MapBits(h, p+n) |
| 428 | mHeap_MapSpans(h, p+n) |
Russ Cox | a3b9797 | 2015-06-15 13:35:56 -0400 | [diff] [blame] | 429 | h.arena_used = p + n |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 430 | if raceenabled { |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 431 | racemapshadow(unsafe.Pointer(p), n) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 432 | } |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 433 | |
| 434 | if uintptr(p)&(_PageSize-1) != 0 { |
| 435 | throw("misrounded allocation in MHeap_SysAlloc") |
| 436 | } |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 437 | return unsafe.Pointer(p) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 438 | } |
| 439 | |
| 440 | // If using 64-bit, our reservation is all we have. |
Matthew Dempsky | 4ff231b | 2015-10-26 17:53:22 -0700 | [diff] [blame] | 441 | if h.arena_end-h.arena_start >= _MaxArena32 { |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 442 | return nil |
| 443 | } |
| 444 | |
| 445 | // On 32-bit, once the reservation is gone we can |
| 446 | // try to get memory at a location chosen by the OS |
| 447 | // and hope that it is in the range we allocated bitmap for. |
| 448 | p_size := round(n, _PageSize) + _PageSize |
| 449 | p := uintptr(sysAlloc(p_size, &memstats.heap_sys)) |
| 450 | if p == 0 { |
| 451 | return nil |
| 452 | } |
| 453 | |
Matthew Dempsky | 4ff231b | 2015-10-26 17:53:22 -0700 | [diff] [blame] | 454 | if p < h.arena_start || uintptr(p)+p_size-h.arena_start >= _MaxArena32 { |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 455 | print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n") |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 456 | sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 457 | return nil |
| 458 | } |
| 459 | |
| 460 | p_end := p + p_size |
| 461 | p += -p & (_PageSize - 1) |
Matthew Dempsky | 4ff231b | 2015-10-26 17:53:22 -0700 | [diff] [blame] | 462 | if uintptr(p)+n > h.arena_used { |
Russ Cox | d57c889 | 2015-06-07 22:59:29 -0400 | [diff] [blame] | 463 | mHeap_MapBits(h, p+n) |
| 464 | mHeap_MapSpans(h, p+n) |
Russ Cox | a3b9797 | 2015-06-15 13:35:56 -0400 | [diff] [blame] | 465 | h.arena_used = p + n |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 466 | if p_end > h.arena_end { |
| 467 | h.arena_end = p_end |
| 468 | } |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 469 | if raceenabled { |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 470 | racemapshadow(unsafe.Pointer(p), n) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 471 | } |
| 472 | } |
| 473 | |
| 474 | if uintptr(p)&(_PageSize-1) != 0 { |
| 475 | throw("misrounded allocation in MHeap_SysAlloc") |
| 476 | } |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 477 | return unsafe.Pointer(p) |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 478 | } |
| 479 | |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 480 | // base address for all 0-byte allocations |
| 481 | var zerobase uintptr |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 482 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 483 | const ( |
| 484 | // flags to malloc |
| 485 | _FlagNoScan = 1 << 0 // GC doesn't have to scan object |
| 486 | _FlagNoZero = 1 << 1 // don't zero memory |
| 487 | ) |
Rick Hudson | db7fd1c | 2015-01-06 14:58:49 -0500 | [diff] [blame] | 488 | |
Dmitriy Vyukov | 3950655 | 2014-08-05 17:03:06 +0400 | [diff] [blame] | 489 | // Allocate an object of size bytes. |
| 490 | // Small objects are allocated from the per-P cache's free lists. |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 491 | // Large objects (> 32 kB) are allocated straight from the heap. |
Austin Clements | 489ff75 | 2014-11-03 13:26:46 -0500 | [diff] [blame] | 492 | func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { |
Rick Hudson | d0eab03 | 2015-03-05 17:33:08 -0500 | [diff] [blame] | 493 | if gcphase == _GCmarktermination { |
| 494 | throw("mallocgc called with gcphase == _GCmarktermination") |
| 495 | } |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 496 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 497 | if size == 0 { |
Russ Cox | d2574e2 | 2014-09-16 10:22:15 -0400 | [diff] [blame] | 498 | return unsafe.Pointer(&zerobase) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 499 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 500 | |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 501 | if flags&flagNoScan == 0 && typ == nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 502 | throw("malloc missing type") |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 503 | } |
| 504 | |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 505 | if debug.sbrk != 0 { |
| 506 | align := uintptr(16) |
| 507 | if typ != nil { |
| 508 | align = uintptr(typ.align) |
| 509 | } |
| 510 | return persistentalloc(size, align, &memstats.other_sys) |
| 511 | } |
| 512 | |
Austin Clements | 65aa2da | 2015-10-04 20:56:11 -0700 | [diff] [blame] | 513 | // assistG is the G to charge for this allocation, or nil if |
| 514 | // GC is not currently active. |
| 515 | var assistG *g |
| 516 | if gcBlackenEnabled != 0 { |
| 517 | // Charge the current user G for this allocation. |
| 518 | assistG = getg() |
| 519 | if assistG.m.curg != nil { |
| 520 | assistG = assistG.m.curg |
| 521 | } |
| 522 | // Charge the allocation against the G. We'll account |
| 523 | // for internal fragmentation at the end of mallocgc. |
| 524 | assistG.gcAssistBytes -= int64(size) |
| 525 | |
| 526 | if assistG.gcAssistBytes < 0 { |
| 527 | // This G is in debt. Assist the GC to correct |
| 528 | // this before allocating. This must happen |
| 529 | // before disabling preemption. |
| 530 | gcAssistAlloc(assistG) |
| 531 | } |
| 532 | } |
| 533 | |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 534 | // Set mp.mallocing to keep from being preempted by GC. |
| 535 | mp := acquirem() |
| 536 | if mp.mallocing != 0 { |
| 537 | throw("malloc deadlock") |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 538 | } |
Brad Fitzpatrick | 6f2c0f1 | 2015-05-12 10:01:37 -0700 | [diff] [blame] | 539 | if mp.gsignal == getg() { |
| 540 | throw("malloc during signal") |
| 541 | } |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 542 | mp.mallocing = 1 |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 543 | |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 544 | shouldhelpgc := false |
| 545 | dataSize := size |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 546 | c := gomcache() |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 547 | var s *mspan |
| 548 | var x unsafe.Pointer |
| 549 | if size <= maxSmallSize { |
| 550 | if flags&flagNoScan != 0 && size < maxTinySize { |
| 551 | // Tiny allocator. |
| 552 | // |
| 553 | // Tiny allocator combines several tiny allocation requests |
| 554 | // into a single memory block. The resulting memory block |
| 555 | // is freed when all subobjects are unreachable. The subobjects |
| 556 | // must be FlagNoScan (don't have pointers), this ensures that |
| 557 | // the amount of potentially wasted memory is bounded. |
| 558 | // |
| 559 | // Size of the memory block used for combining (maxTinySize) is tunable. |
| 560 | // Current setting is 16 bytes, which relates to 2x worst case memory |
| 561 | // wastage (when all but one subobjects are unreachable). |
| 562 | // 8 bytes would result in no wastage at all, but provides less |
| 563 | // opportunities for combining. |
| 564 | // 32 bytes provides more opportunities for combining, |
| 565 | // but can lead to 4x worst case wastage. |
| 566 | // The best case winning is 8x regardless of block size. |
| 567 | // |
| 568 | // Objects obtained from tiny allocator must not be freed explicitly. |
| 569 | // So when an object will be freed explicitly, we ensure that |
| 570 | // its size >= maxTinySize. |
| 571 | // |
| 572 | // SetFinalizer has a special case for objects potentially coming |
| 573 | // from tiny allocator, it such case it allows to set finalizers |
| 574 | // for an inner byte of a memory block. |
| 575 | // |
| 576 | // The main targets of tiny allocator are small strings and |
| 577 | // standalone escaping variables. On a json benchmark |
| 578 | // the allocator reduces number of allocations by ~12% and |
| 579 | // reduces heap size by ~20%. |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 580 | off := c.tinyoffset |
| 581 | // Align tiny pointer for required (conservative) alignment. |
| 582 | if size&7 == 0 { |
| 583 | off = round(off, 8) |
| 584 | } else if size&3 == 0 { |
| 585 | off = round(off, 4) |
| 586 | } else if size&1 == 0 { |
| 587 | off = round(off, 2) |
| 588 | } |
Russ Cox | 3423b7b | 2015-01-14 15:48:32 -0500 | [diff] [blame] | 589 | if off+size <= maxTinySize && c.tiny != nil { |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 590 | // The object fits into existing tiny block. |
| 591 | x = add(c.tiny, off) |
| 592 | c.tinyoffset = off + size |
| 593 | c.local_tinyallocs++ |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 594 | mp.mallocing = 0 |
| 595 | releasem(mp) |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 596 | return x |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 597 | } |
| 598 | // Allocate a new maxTinySize block. |
| 599 | s = c.alloc[tinySizeClass] |
| 600 | v := s.freelist |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 601 | if v.ptr() == nil { |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 602 | systemstack(func() { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 603 | mCache_Refill(c, tinySizeClass) |
| 604 | }) |
Rick Hudson | db7fd1c | 2015-01-06 14:58:49 -0500 | [diff] [blame] | 605 | shouldhelpgc = true |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 606 | s = c.alloc[tinySizeClass] |
| 607 | v = s.freelist |
| 608 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 609 | s.freelist = v.ptr().next |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 610 | s.ref++ |
Rick Hudson | 99482f2 | 2015-02-19 18:11:24 -0500 | [diff] [blame] | 611 | // prefetchnta offers best performance, see change list message. |
| 612 | prefetchnta(uintptr(v.ptr().next)) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 613 | x = unsafe.Pointer(v) |
| 614 | (*[2]uint64)(x)[0] = 0 |
| 615 | (*[2]uint64)(x)[1] = 0 |
| 616 | // See if we need to replace the existing tiny block with the new one |
| 617 | // based on amount of remaining free space. |
Matthew Dempsky | d18167f | 2015-10-26 12:38:47 -0700 | [diff] [blame] | 618 | if size < c.tinyoffset || c.tiny == nil { |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 619 | c.tiny = x |
| 620 | c.tinyoffset = size |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 621 | } |
| 622 | size = maxTinySize |
| 623 | } else { |
| 624 | var sizeclass int8 |
| 625 | if size <= 1024-8 { |
| 626 | sizeclass = size_to_class8[(size+7)>>3] |
| 627 | } else { |
| 628 | sizeclass = size_to_class128[(size-1024+127)>>7] |
| 629 | } |
| 630 | size = uintptr(class_to_size[sizeclass]) |
| 631 | s = c.alloc[sizeclass] |
| 632 | v := s.freelist |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 633 | if v.ptr() == nil { |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 634 | systemstack(func() { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 635 | mCache_Refill(c, int32(sizeclass)) |
| 636 | }) |
Rick Hudson | db7fd1c | 2015-01-06 14:58:49 -0500 | [diff] [blame] | 637 | shouldhelpgc = true |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 638 | s = c.alloc[sizeclass] |
| 639 | v = s.freelist |
| 640 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 641 | s.freelist = v.ptr().next |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 642 | s.ref++ |
Rick Hudson | 99482f2 | 2015-02-19 18:11:24 -0500 | [diff] [blame] | 643 | // prefetchnta offers best performance, see change list message. |
| 644 | prefetchnta(uintptr(v.ptr().next)) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 645 | x = unsafe.Pointer(v) |
| 646 | if flags&flagNoZero == 0 { |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 647 | v.ptr().next = 0 |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 648 | if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 { |
| 649 | memclr(unsafe.Pointer(v), size) |
| 650 | } |
| 651 | } |
| 652 | } |
Austin Clements | 91c80ce | 2015-04-13 17:06:22 -0400 | [diff] [blame] | 653 | c.local_cachealloc += size |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 654 | } else { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 655 | var s *mspan |
Rick Hudson | db7fd1c | 2015-01-06 14:58:49 -0500 | [diff] [blame] | 656 | shouldhelpgc = true |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 657 | systemstack(func() { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 658 | s = largeAlloc(size, uint32(flags)) |
| 659 | }) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 660 | x = unsafe.Pointer(uintptr(s.start << pageShift)) |
| 661 | size = uintptr(s.elemsize) |
| 662 | } |
| 663 | |
Dmitriy Vyukov | 187d0f6 | 2014-08-13 20:42:55 +0400 | [diff] [blame] | 664 | if flags&flagNoScan != 0 { |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 665 | // All objects are pre-marked as noscan. Nothing to do. |
| 666 | } else { |
| 667 | // If allocating a defer+arg block, now that we've picked a malloc size |
| 668 | // large enough to hold everything, cut the "asked for" size down to |
| 669 | // just the defer header, so that the GC bitmap will record the arg block |
| 670 | // as containing nothing at all (as if it were unused space at the end of |
| 671 | // a malloc block caused by size rounding). |
| 672 | // The defer arg areas are scanned as part of scanstack. |
| 673 | if typ == deferType { |
| 674 | dataSize = unsafe.Sizeof(_defer{}) |
| 675 | } |
| 676 | heapBitsSetType(uintptr(x), size, dataSize, typ) |
Austin Clements | 3be3cbd | 2015-05-04 16:10:49 -0400 | [diff] [blame] | 677 | if dataSize > typ.size { |
| 678 | // Array allocation. If there are any |
| 679 | // pointers, GC has to scan to the last |
| 680 | // element. |
| 681 | if typ.ptrdata != 0 { |
| 682 | c.local_scan += dataSize - typ.size + typ.ptrdata |
| 683 | } |
| 684 | } else { |
| 685 | c.local_scan += typ.ptrdata |
| 686 | } |
Austin Clements | f5d494b | 2015-06-15 12:30:23 -0400 | [diff] [blame] | 687 | |
| 688 | // Ensure that the stores above that initialize x to |
| 689 | // type-safe memory and set the heap bits occur before |
| 690 | // the caller can make x observable to the garbage |
| 691 | // collector. Otherwise, on weakly ordered machines, |
| 692 | // the garbage collector could follow a pointer to x, |
| 693 | // but see uninitialized memory or stale heap bits. |
| 694 | publicationBarrier() |
Dmitriy Vyukov | aac7f1a | 2014-08-07 13:34:30 +0400 | [diff] [blame] | 695 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 696 | |
Rick Hudson | 77db737 | 2014-11-04 13:31:34 -0500 | [diff] [blame] | 697 | // GCmarkterminate allocates black |
| 698 | // All slots hold nil so no scanning is needed. |
| 699 | // This may be racing with GC so do it atomically if there can be |
| 700 | // a race marking the bit. |
Rick Hudson | 90a1996 | 2015-06-01 18:16:03 -0400 | [diff] [blame] | 701 | if gcphase == _GCmarktermination || gcBlackenPromptly { |
Russ Cox | 0fcf54b | 2014-11-15 08:00:38 -0500 | [diff] [blame] | 702 | systemstack(func() { |
Austin Clements | 50a6656 | 2015-03-12 16:53:57 -0400 | [diff] [blame] | 703 | gcmarknewobject_m(uintptr(x), size) |
Russ Cox | 0fcf54b | 2014-11-15 08:00:38 -0500 | [diff] [blame] | 704 | }) |
Rick Hudson | 77db737 | 2014-11-04 13:31:34 -0500 | [diff] [blame] | 705 | } |
| 706 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 707 | if raceenabled { |
| 708 | racemalloc(x, size) |
| 709 | } |
Ian Lance Taylor | 73f329f | 2015-10-21 11:04:42 -0700 | [diff] [blame] | 710 | if msanenabled { |
| 711 | msanmalloc(x, size) |
| 712 | } |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 713 | |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 714 | mp.mallocing = 0 |
| 715 | releasem(mp) |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 716 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 717 | if debug.allocfreetrace != 0 { |
| 718 | tracealloc(x, size, typ) |
| 719 | } |
Dmitriy Vyukov | 1837419 | 2014-08-13 01:03:32 +0400 | [diff] [blame] | 720 | |
| 721 | if rate := MemProfileRate; rate > 0 { |
| 722 | if size < uintptr(rate) && int32(size) < c.next_sample { |
| 723 | c.next_sample -= int32(size) |
| 724 | } else { |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 725 | mp := acquirem() |
Dmitriy Vyukov | 1837419 | 2014-08-13 01:03:32 +0400 | [diff] [blame] | 726 | profilealloc(mp, x, size) |
Dmitriy Vyukov | 30940cf | 2014-08-18 16:33:39 +0400 | [diff] [blame] | 727 | releasem(mp) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 728 | } |
| 729 | } |
| 730 | |
Austin Clements | 65aa2da | 2015-10-04 20:56:11 -0700 | [diff] [blame] | 731 | if assistG != nil { |
| 732 | // Account for internal fragmentation in the assist |
| 733 | // debt now that we know it. |
| 734 | assistG.gcAssistBytes -= int64(size - dataSize) |
| 735 | } |
| 736 | |
Austin Clements | f54bced | 2015-10-23 14:15:18 -0400 | [diff] [blame] | 737 | if shouldhelpgc && gcShouldStart(false) { |
| 738 | gcStart(gcBackgroundMode, false) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | return x |
| 742 | } |
| 743 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 744 | func largeAlloc(size uintptr, flag uint32) *mspan { |
| 745 | // print("largeAlloc size=", size, "\n") |
| 746 | |
| 747 | if size+_PageSize < size { |
| 748 | throw("out of memory") |
| 749 | } |
| 750 | npages := size >> _PageShift |
| 751 | if size&_PageMask != 0 { |
| 752 | npages++ |
| 753 | } |
Austin Clements | fc9ca85 | 2015-08-03 09:46:50 -0400 | [diff] [blame] | 754 | |
| 755 | // Deduct credit for this span allocation and sweep if |
| 756 | // necessary. mHeap_Alloc will also sweep npages, so this only |
| 757 | // pays the debt down to npage pages. |
| 758 | deductSweepCredit(npages*_PageSize, npages) |
| 759 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 760 | s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0) |
| 761 | if s == nil { |
| 762 | throw("out of memory") |
| 763 | } |
| 764 | s.limit = uintptr(s.start)<<_PageShift + size |
| 765 | heapBitsForSpan(s.base()).initSpan(s.layout()) |
| 766 | return s |
| 767 | } |
| 768 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 769 | // implementation of new builtin |
| 770 | func newobject(typ *_type) unsafe.Pointer { |
Austin Clements | 489ff75 | 2014-11-03 13:26:46 -0500 | [diff] [blame] | 771 | flags := uint32(0) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 772 | if typ.kind&kindNoPointers != 0 { |
| 773 | flags |= flagNoScan |
| 774 | } |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 775 | return mallocgc(uintptr(typ.size), typ, flags) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 776 | } |
| 777 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 778 | //go:linkname reflect_unsafe_New reflect.unsafe_New |
| 779 | func reflect_unsafe_New(typ *_type) unsafe.Pointer { |
| 780 | return newobject(typ) |
| 781 | } |
| 782 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 783 | // implementation of make builtin for slices |
| 784 | func newarray(typ *_type, n uintptr) unsafe.Pointer { |
Austin Clements | 489ff75 | 2014-11-03 13:26:46 -0500 | [diff] [blame] | 785 | flags := uint32(0) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 786 | if typ.kind&kindNoPointers != 0 { |
| 787 | flags |= flagNoScan |
| 788 | } |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 789 | if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) { |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 790 | panic("runtime: allocation size out of range") |
| 791 | } |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 792 | return mallocgc(uintptr(typ.size)*n, typ, flags) |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 793 | } |
| 794 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 795 | //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray |
| 796 | func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer { |
| 797 | return newarray(typ, n) |
| 798 | } |
| 799 | |
Keith Randall | cc9ec52 | 2014-07-31 12:43:40 -0700 | [diff] [blame] | 800 | // rawmem returns a chunk of pointerless memory. It is |
| 801 | // not zeroed. |
| 802 | func rawmem(size uintptr) unsafe.Pointer { |
Russ Cox | bffb059 | 2014-09-09 01:08:34 -0400 | [diff] [blame] | 803 | return mallocgc(size, nil, flagNoScan|flagNoZero) |
Keith Randall | cc9ec52 | 2014-07-31 12:43:40 -0700 | [diff] [blame] | 804 | } |
| 805 | |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 806 | func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { |
Raul Silvera | 27ee719 | 2015-09-14 14:03:45 -0700 | [diff] [blame] | 807 | mp.mcache.next_sample = nextSample() |
| 808 | mProf_Malloc(x, size) |
| 809 | } |
| 810 | |
| 811 | // nextSample returns the next sampling point for heap profiling. |
| 812 | // It produces a random variable with a geometric distribution and |
| 813 | // mean MemProfileRate. This is done by generating a uniformly |
| 814 | // distributed random number and applying the cumulative distribution |
| 815 | // function for an exponential. |
| 816 | func nextSample() int32 { |
David du Colombier | 31430bd | 2015-10-28 06:44:26 +0100 | [diff] [blame] | 817 | if GOOS == "plan9" { |
| 818 | // Plan 9 doesn't support floating point in note handler. |
| 819 | if g := getg(); g == g.m.gsignal { |
| 820 | return nextSampleNoFP() |
| 821 | } |
| 822 | } |
| 823 | |
Raul Silvera | 27ee719 | 2015-09-14 14:03:45 -0700 | [diff] [blame] | 824 | period := MemProfileRate |
| 825 | |
| 826 | // make nextSample not overflow. Maximum possible step is |
| 827 | // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period. |
| 828 | switch { |
| 829 | case period > 0x7000000: |
| 830 | period = 0x7000000 |
| 831 | case period == 0: |
| 832 | return 0 |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 833 | } |
Russ Cox | 548d080 | 2014-09-01 18:51:12 -0400 | [diff] [blame] | 834 | |
Raul Silvera | 27ee719 | 2015-09-14 14:03:45 -0700 | [diff] [blame] | 835 | // Let m be the sample rate, |
| 836 | // the probability distribution function is m*exp(-mx), so the CDF is |
| 837 | // p = 1 - exp(-mx), so |
| 838 | // q = 1 - p == exp(-mx) |
| 839 | // log_e(q) = -mx |
| 840 | // -log_e(q)/m = x |
| 841 | // x = -log_e(q) * period |
| 842 | // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency |
| 843 | const randomBitCount = 26 |
| 844 | q := uint32(fastrand1())%(1<<randomBitCount) + 1 |
| 845 | qlog := fastlog2(float64(q)) - randomBitCount |
| 846 | if qlog > 0 { |
| 847 | qlog = 0 |
| 848 | } |
| 849 | const minusLog2 = -0.6931471805599453 // -ln(2) |
| 850 | return int32(qlog*(minusLog2*float64(period))) + 1 |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 851 | } |
| 852 | |
David du Colombier | 31430bd | 2015-10-28 06:44:26 +0100 | [diff] [blame] | 853 | // nextSampleNoFP is similar to nextSample, but uses older, |
| 854 | // simpler code to avoid floating point. |
| 855 | func nextSampleNoFP() int32 { |
| 856 | // Set first allocation sample size. |
| 857 | rate := MemProfileRate |
| 858 | if rate > 0x3fffffff { // make 2*rate not overflow |
| 859 | rate = 0x3fffffff |
| 860 | } |
| 861 | if rate != 0 { |
| 862 | return int32(int(fastrand1()) % (2 * rate)) |
| 863 | } |
| 864 | return 0 |
| 865 | } |
| 866 | |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 867 | type persistentAlloc struct { |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 868 | base unsafe.Pointer |
| 869 | off uintptr |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 870 | } |
| 871 | |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 872 | var globalAlloc struct { |
| 873 | mutex |
| 874 | persistentAlloc |
| 875 | } |
| 876 | |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 877 | // Wrapper around sysAlloc that can allocate small chunks. |
| 878 | // There is no associated free operation. |
| 879 | // Intended for things like function/type/debug-related persistent data. |
| 880 | // If align is 0, uses default align (currently 8). |
Srdjan Petrovic | 6ad33be | 2015-04-16 14:32:18 -0700 | [diff] [blame] | 881 | func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { |
Russ Cox | 08e25fc | 2015-06-07 21:45:39 -0400 | [diff] [blame] | 882 | var p unsafe.Pointer |
| 883 | systemstack(func() { |
| 884 | p = persistentalloc1(size, align, sysStat) |
| 885 | }) |
| 886 | return p |
| 887 | } |
| 888 | |
| 889 | // Must run on system stack because stack growth can (re)invoke it. |
| 890 | // See issue 9174. |
| 891 | //go:systemstack |
| 892 | func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 893 | const ( |
| 894 | chunk = 256 << 10 |
| 895 | maxBlock = 64 << 10 // VM reservation granularity is 64K on windows |
| 896 | ) |
| 897 | |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 898 | if size == 0 { |
| 899 | throw("persistentalloc: size == 0") |
| 900 | } |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 901 | if align != 0 { |
| 902 | if align&(align-1) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 903 | throw("persistentalloc: align is not a power of 2") |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 904 | } |
| 905 | if align > _PageSize { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 906 | throw("persistentalloc: align is too large") |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 907 | } |
| 908 | } else { |
| 909 | align = 8 |
| 910 | } |
| 911 | |
| 912 | if size >= maxBlock { |
Srdjan Petrovic | 6ad33be | 2015-04-16 14:32:18 -0700 | [diff] [blame] | 913 | return sysAlloc(size, sysStat) |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 914 | } |
| 915 | |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 916 | mp := acquirem() |
| 917 | var persistent *persistentAlloc |
Russ Cox | 181e26b | 2015-04-17 00:21:30 -0400 | [diff] [blame] | 918 | if mp != nil && mp.p != 0 { |
| 919 | persistent = &mp.p.ptr().palloc |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 920 | } else { |
| 921 | lock(&globalAlloc.mutex) |
| 922 | persistent = &globalAlloc.persistentAlloc |
| 923 | } |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 924 | persistent.off = round(persistent.off, align) |
Russ Cox | 3423b7b | 2015-01-14 15:48:32 -0500 | [diff] [blame] | 925 | if persistent.off+size > chunk || persistent.base == nil { |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 926 | persistent.base = sysAlloc(chunk, &memstats.other_sys) |
| 927 | if persistent.base == nil { |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 928 | if persistent == &globalAlloc.persistentAlloc { |
| 929 | unlock(&globalAlloc.mutex) |
| 930 | } |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 931 | throw("runtime: cannot allocate memory") |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 932 | } |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 933 | persistent.off = 0 |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 934 | } |
Russ Cox | 0e84088 | 2015-01-14 14:13:55 -0500 | [diff] [blame] | 935 | p := add(persistent.base, persistent.off) |
| 936 | persistent.off += size |
Russ Cox | 564eab8 | 2015-03-08 20:56:15 -0400 | [diff] [blame] | 937 | releasem(mp) |
| 938 | if persistent == &globalAlloc.persistentAlloc { |
| 939 | unlock(&globalAlloc.mutex) |
| 940 | } |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 941 | |
Srdjan Petrovic | 6ad33be | 2015-04-16 14:32:18 -0700 | [diff] [blame] | 942 | if sysStat != &memstats.other_sys { |
| 943 | mSysStatInc(sysStat, size) |
| 944 | mSysStatDec(&memstats.other_sys, size) |
Russ Cox | e3edfea | 2014-09-04 00:54:06 -0400 | [diff] [blame] | 945 | } |
| 946 | return p |
| 947 | } |