blob: 365422a87a0ec73d8af9a66c97fb43e89bbf7e1e [file] [log] [blame]
Keith Randall4aa50432014-07-30 09:01:52 -07001// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
Russ Cox484f8012015-02-19 13:38:46 -05005// Memory allocator, based on tcmalloc.
6// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
7
8// The main allocator works in runs of pages.
9// Small allocation sizes (up to and including 32 kB) are
10// rounded to one of about 100 size classes, each of which
11// has its own free list of objects of exactly that size.
12// Any free page of memory can be split into a set of objects
13// of one size class, which are then managed using free list
14// allocators.
15//
16// The allocator's data structures are:
17//
18// FixAlloc: a free-list allocator for fixed-size objects,
19// used to manage storage used by the allocator.
20// MHeap: the malloc heap, managed at page (4096-byte) granularity.
21// MSpan: a run of pages managed by the MHeap.
22// MCentral: a shared free list for a given size class.
23// MCache: a per-thread (in Go, per-P) cache for small objects.
24// MStats: allocation statistics.
25//
26// Allocating a small object proceeds up a hierarchy of caches:
27//
28// 1. Round the size up to one of the small size classes
29// and look in the corresponding MCache free list.
30// If the list is not empty, allocate an object from it.
31// This can all be done without acquiring a lock.
32//
33// 2. If the MCache free list is empty, replenish it by
34// taking a bunch of objects from the MCentral free list.
35// Moving a bunch amortizes the cost of acquiring the MCentral lock.
36//
37// 3. If the MCentral free list is empty, replenish it by
38// allocating a run of pages from the MHeap and then
39// chopping that memory into objects of the given size.
40// Allocating many objects amortizes the cost of locking
41// the heap.
42//
43// 4. If the MHeap is empty or has no page runs large enough,
44// allocate a new group of pages (at least 1MB) from the
45// operating system. Allocating a large run of pages
46// amortizes the cost of talking to the operating system.
47//
48// Freeing a small object proceeds up the same hierarchy:
49//
50// 1. Look up the size class for the object and add it to
51// the MCache free list.
52//
53// 2. If the MCache free list is too long or the MCache has
54// too much memory, return some to the MCentral free lists.
55//
56// 3. If all the objects in a given span have returned to
57// the MCentral list, return that span to the page heap.
58//
59// 4. If the heap has too much memory, return some to the
60// operating system.
61//
62// TODO(rsc): Step 4 is not implemented.
63//
64// Allocating and freeing a large object uses the page heap
65// directly, bypassing the MCache and MCentral free lists.
66//
67// The small objects on the MCache and MCentral free lists
68// may or may not be zeroed. They are zeroed if and only if
69// the second word of the object is zero. A span in the
70// page heap is zeroed unless s->needzero is set. When a span
71// is allocated to break into small objects, it is zeroed if needed
72// and s->needzero is set. There are two main benefits to delaying the
73// zeroing this way:
74//
75// 1. stack frames allocated from the small object lists
76// or the page heap can avoid zeroing altogether.
77// 2. the cost of zeroing when reusing a small object is
78// charged to the mutator, not the garbage collector.
79//
80// This code was written with an eye toward translating to Go
81// in the future. Methods have the form Type_Method(Type *t, ...).
82
Keith Randall4aa50432014-07-30 09:01:52 -070083package runtime
84
Russ Coxfd21b9f2014-08-15 15:22:33 -040085import "unsafe"
Keith Randall4aa50432014-07-30 09:01:52 -070086
87const (
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +040088 debugMalloc = false
89
Russ Coxd2574e22014-09-16 10:22:15 -040090 flagNoScan = _FlagNoScan
91 flagNoZero = _FlagNoZero
Keith Randall4aa50432014-07-30 09:01:52 -070092
Russ Coxd2574e22014-09-16 10:22:15 -040093 maxTinySize = _TinySize
94 tinySizeClass = _TinySizeClass
95 maxSmallSize = _MaxSmallSize
Keith Randall4aa50432014-07-30 09:01:52 -070096
Russ Coxd2574e22014-09-16 10:22:15 -040097 pageShift = _PageShift
98 pageSize = _PageSize
99 pageMask = _PageMask
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400100
Russ Coxd2574e22014-09-16 10:22:15 -0400101 mSpanInUse = _MSpanInUse
Keith Randallc46bcd42014-08-28 13:23:10 -0700102
Russ Cox1e2d2f02014-11-11 17:05:02 -0500103 concurrentSweep = _ConcurrentSweep
Keith Randall4aa50432014-07-30 09:01:52 -0700104)
105
Russ Cox484f8012015-02-19 13:38:46 -0500106const (
107 _PageShift = 13
108 _PageSize = 1 << _PageShift
109 _PageMask = _PageSize - 1
110)
111
112const (
113 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
114 _64bit = 1 << (^uintptr(0) >> 63) / 2
115
116 // Computed constant. The definition of MaxSmallSize and the
Keith Randallcd5b1442015-03-11 12:58:47 -0700117 // algorithm in msize.go produces some number of different allocation
Russ Cox484f8012015-02-19 13:38:46 -0500118 // size classes. NumSizeClasses is that number. It's needed here
119 // because there are static arrays of this length; when msize runs its
120 // size choosing algorithm it double-checks that NumSizeClasses agrees.
121 _NumSizeClasses = 67
122
123 // Tunable constants.
124 _MaxSmallSize = 32 << 10
125
126 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
127 _TinySize = 16
128 _TinySizeClass = 2
129
130 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
131 _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
132 _HeapAllocChunk = 1 << 20 // Chunk size for heap growth
133
134 // Per-P, per order stack segment cache size.
135 _StackCacheSize = 32 * 1024
136
137 // Number of orders that get caching. Order 0 is FixedStack
138 // and each successive order is twice as large.
139 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
140 // will be allocated directly.
141 // Since FixedStack is different on different systems, we
142 // must vary NumStackOrders to keep the same maximum cached size.
143 // OS | FixedStack | NumStackOrders
144 // -----------------+------------+---------------
145 // linux/darwin/bsd | 2KB | 4
146 // windows/32 | 4KB | 3
147 // windows/64 | 8KB | 2
148 // plan9 | 4KB | 3
149 _NumStackOrders = 4 - ptrSize/4*goos_windows - 1*goos_plan9
150
151 // Number of bits in page to span calculations (4k pages).
152 // On Windows 64-bit we limit the arena to 32GB or 35 bits.
153 // Windows counts memory used by page table into committed memory
154 // of the process, so we can't reserve too much memory.
Brad Fitzpatrick2ae77372015-07-10 17:17:11 -0600155 // See https://golang.org/issue/5402 and https://golang.org/issue/5236.
Russ Cox43aac4f2015-06-08 00:14:08 -0400156 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
Russ Cox484f8012015-02-19 13:38:46 -0500157 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
Shenghou Ma4a71b912015-04-10 22:14:43 -0400158 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
159 // but as most devices have less than 4GB of physical memory anyway, we
160 // try to be conservative here, and only ask for a 2GB heap.
Russ Cox43aac4f2015-06-08 00:14:08 -0400161 _MHeapMap_TotalBits = (_64bit*goos_windows)*35 + (_64bit*(1-goos_windows)*(1-goos_darwin*goarch_arm64))*39 + goos_darwin*goarch_arm64*31 + (1-_64bit)*32
Russ Cox484f8012015-02-19 13:38:46 -0500162 _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift
163
164 _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
165
166 // Max number of threads to run garbage collection.
167 // 2, 3, and 4 are all plausible maximums depending
168 // on the hardware details of the machine. The garbage
169 // collector scales well to 32 cpus.
170 _MaxGcproc = 32
171)
172
Russ Coxd21638b2014-08-27 21:59:49 -0400173// Page number (address>>pageShift)
174type pageID uintptr
175
Russ Cox484f8012015-02-19 13:38:46 -0500176const _MaxArena32 = 2 << 30
177
178// OS-defined helpers:
179//
180// sysAlloc obtains a large chunk of zeroed memory from the
181// operating system, typically on the order of a hundred kilobytes
182// or a megabyte.
183// NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
184// may use larger alignment, so the caller must be careful to realign the
185// memory obtained by sysAlloc.
186//
187// SysUnused notifies the operating system that the contents
188// of the memory region are no longer needed and can be reused
189// for other purposes.
190// SysUsed notifies the operating system that the contents
191// of the memory region are needed again.
192//
193// SysFree returns it unconditionally; this is only used if
194// an out-of-memory error has been detected midway through
195// an allocation. It is okay if SysFree is a no-op.
196//
197// SysReserve reserves address space without allocating memory.
198// If the pointer passed to it is non-nil, the caller wants the
199// reservation there, but SysReserve can still choose another
200// location if that one is unavailable. On some systems and in some
201// cases SysReserve will simply check that the address space is
202// available and not actually reserve it. If SysReserve returns
203// non-nil, it sets *reserved to true if the address space is
204// reserved, false if it has merely been checked.
205// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
206// may use larger alignment, so the caller must be careful to realign the
207// memory obtained by sysAlloc.
208//
209// SysMap maps previously reserved address space for use.
210// The reserved argument is true if the address space was really
211// reserved, not merely checked.
212//
213// SysFault marks a (already sysAlloc'd) region to fault
214// if accessed. Used only for debugging the runtime.
215
216func mallocinit() {
217 initSizes()
218
219 if class_to_size[_TinySizeClass] != _TinySize {
220 throw("bad TinySizeClass")
221 }
222
223 var p, bitmapSize, spansSize, pSize, limit uintptr
224 var reserved bool
225
226 // limit = runtime.memlimit();
227 // See https://golang.org/issue/5049
228 // TODO(rsc): Fix after 1.1.
229 limit = 0
230
231 // Set up the allocation arena, a contiguous area of memory where
232 // allocated data will be found. The arena begins with a bitmap large
233 // enough to hold 4 bits per allocated word.
234 if ptrSize == 8 && (limit == 0 || limit > 1<<30) {
235 // On a 64-bit machine, allocate from a single contiguous reservation.
Russ Cox43aac4f2015-06-08 00:14:08 -0400236 // 512 GB (MaxMem) should be big enough for now.
Russ Cox484f8012015-02-19 13:38:46 -0500237 //
238 // The code will work with the reservation at any address, but ask
239 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
Russ Cox43aac4f2015-06-08 00:14:08 -0400240 // Allocating a 512 GB region takes away 39 bits, and the amd64
241 // doesn't let us choose the top 17 bits, so that leaves the 9 bits
Russ Cox484f8012015-02-19 13:38:46 -0500242 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
243 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
244 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
245 // UTF-8 sequences, and they are otherwise as far away from
246 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
247 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
248 // on OS X during thread allocations. 0x00c0 causes conflicts with
249 // AddressSanitizer which reserves all memory up to 0x0100.
250 // These choices are both for debuggability and to reduce the
Russ Cox43aac4f2015-06-08 00:14:08 -0400251 // odds of a conservative garbage collector (as is still used in gccgo)
252 // not collecting memory because some non-pointer block of memory
253 // had a bit pattern that matched a memory address.
Russ Cox484f8012015-02-19 13:38:46 -0500254 //
Russ Cox43aac4f2015-06-08 00:14:08 -0400255 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
Russ Cox484f8012015-02-19 13:38:46 -0500256 // but it hardly matters: e0 00 is not valid UTF-8 either.
257 //
258 // If this fails we fall back to the 32 bit memory mechanism
Aram Hăvărneanu846ee042015-03-08 14:20:20 +0100259 //
260 // However, on arm64, we ignore all this advice above and slam the
261 // allocation at 0x40 << 32 because when using 4k pages with 3-level
262 // translation buffers, the user address space is limited to 39 bits
Shenghou Ma4a71b912015-04-10 22:14:43 -0400263 // On darwin/arm64, the address space is even smaller.
Russ Cox484f8012015-02-19 13:38:46 -0500264 arenaSize := round(_MaxMem, _PageSize)
265 bitmapSize = arenaSize / (ptrSize * 8 / 4)
266 spansSize = arenaSize / _PageSize * ptrSize
267 spansSize = round(spansSize, _PageSize)
268 for i := 0; i <= 0x7f; i++ {
Shenghou Ma4a71b912015-04-10 22:14:43 -0400269 switch {
270 case GOARCH == "arm64" && GOOS == "darwin":
271 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
272 case GOARCH == "arm64":
Aram Hăvărneanu846ee042015-03-08 14:20:20 +0100273 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
Shenghou Ma4a71b912015-04-10 22:14:43 -0400274 default:
Aram Hăvărneanu846ee042015-03-08 14:20:20 +0100275 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
276 }
Russ Cox484f8012015-02-19 13:38:46 -0500277 pSize = bitmapSize + spansSize + arenaSize + _PageSize
278 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
279 if p != 0 {
280 break
281 }
282 }
283 }
284
285 if p == 0 {
286 // On a 32-bit machine, we can't typically get away
287 // with a giant virtual address space reservation.
288 // Instead we map the memory information bitmap
289 // immediately after the data segment, large enough
290 // to handle another 2GB of mappings (256 MB),
291 // along with a reservation for an initial arena.
292 // When that gets used up, we'll start asking the kernel
293 // for any memory anywhere and hope it's in the 2GB
294 // following the bitmap (presumably the executable begins
295 // near the bottom of memory, so we'll have to use up
296 // most of memory before the kernel resorts to giving out
297 // memory before the beginning of the text segment).
298 //
299 // Alternatively we could reserve 512 MB bitmap, enough
300 // for 4GB of mappings, and then accept any memory the
301 // kernel threw at us, but normally that's a waste of 512 MB
302 // of address space, which is probably too much in a 32-bit world.
303
304 // If we fail to allocate, try again with a smaller arena.
305 // This is necessary on Android L where we share a process
306 // with ART, which reserves virtual memory aggressively.
307 arenaSizes := []uintptr{
308 512 << 20,
309 256 << 20,
David Crawshaw85d09572015-02-24 11:11:56 -0500310 128 << 20,
Russ Cox484f8012015-02-19 13:38:46 -0500311 }
312
313 for _, arenaSize := range arenaSizes {
314 bitmapSize = _MaxArena32 / (ptrSize * 8 / 4)
315 spansSize = _MaxArena32 / _PageSize * ptrSize
316 if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
317 bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
318 arenaSize = bitmapSize * 8
319 spansSize = arenaSize / _PageSize * ptrSize
320 }
321 spansSize = round(spansSize, _PageSize)
322
323 // SysReserve treats the address we ask for, end, as a hint,
324 // not as an absolute requirement. If we ask for the end
325 // of the data segment but the operating system requires
326 // a little more space before we can start allocating, it will
327 // give out a slightly higher pointer. Except QEMU, which
328 // is buggy, as usual: it won't adjust the pointer upward.
329 // So adjust it upward a little bit ourselves: 1/4 MB to get
330 // away from the running binary image and then round up
331 // to a MB boundary.
Michael Hudson-Doylea1f57592015-04-07 12:55:02 +1200332 p = round(firstmoduledata.end+(1<<18), 1<<20)
Russ Cox484f8012015-02-19 13:38:46 -0500333 pSize = bitmapSize + spansSize + arenaSize + _PageSize
334 p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
335 if p != 0 {
336 break
337 }
338 }
339 if p == 0 {
340 throw("runtime: cannot reserve arena virtual address space")
341 }
342 }
343
344 // PageSize can be larger than OS definition of page size,
345 // so SysReserve can give us a PageSize-unaligned pointer.
346 // To overcome this we ask for PageSize more and round up the pointer.
347 p1 := round(p, _PageSize)
348
349 mheap_.spans = (**mspan)(unsafe.Pointer(p1))
350 mheap_.bitmap = p1 + spansSize
351 mheap_.arena_start = p1 + (spansSize + bitmapSize)
352 mheap_.arena_used = mheap_.arena_start
353 mheap_.arena_end = p + pSize
354 mheap_.arena_reserved = reserved
355
356 if mheap_.arena_start&(_PageSize-1) != 0 {
357 println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
358 throw("misrounded allocation in mallocinit")
359 }
360
361 // Initialize the rest of the allocator.
362 mHeap_Init(&mheap_, spansSize)
363 _g_ := getg()
364 _g_.m.mcache = allocmcache()
365}
366
367// sysReserveHigh reserves space somewhere high in the address space.
368// sysReserve doesn't actually reserve the full amount requested on
369// 64-bit systems, because of problems with ulimit. Instead it checks
370// that it can get the first 64 kB and assumes it can grab the rest as
371// needed. This doesn't work well with the "let the kernel pick an address"
372// mode, so don't do that. Pick a high address instead.
373func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
374 if ptrSize == 4 {
375 return sysReserve(nil, n, reserved)
376 }
377
378 for i := 0; i <= 0x7f; i++ {
379 p := uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
380 *reserved = false
381 p = uintptr(sysReserve(unsafe.Pointer(p), n, reserved))
382 if p != 0 {
383 return unsafe.Pointer(p)
384 }
385 }
386
387 return sysReserve(nil, n, reserved)
388}
389
390func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
Matthew Dempsky4ff231b2015-10-26 17:53:22 -0700391 if n > h.arena_end-h.arena_used {
Russ Cox484f8012015-02-19 13:38:46 -0500392 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
393 // Reserve some more space.
394 p_size := round(n+_PageSize, 256<<20)
395 new_end := h.arena_end + p_size
396 if new_end <= h.arena_start+_MaxArena32 {
397 // TODO: It would be bad if part of the arena
398 // is reserved and part is not.
399 var reserved bool
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700400 p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
Joel Sing1d5251f2015-09-27 03:56:05 +1000401 if p == 0 {
402 return nil
403 }
Russ Cox484f8012015-02-19 13:38:46 -0500404 if p == h.arena_end {
405 h.arena_end = new_end
406 h.arena_reserved = reserved
407 } else if p+p_size <= h.arena_start+_MaxArena32 {
408 // Keep everything page-aligned.
409 // Our pages are bigger than hardware pages.
410 h.arena_end = p + p_size
Austin Clements9a3112b2015-06-22 11:18:23 -0400411 used := p + (-uintptr(p) & (_PageSize - 1))
412 mHeap_MapBits(h, used)
413 mHeap_MapSpans(h, used)
414 h.arena_used = used
Russ Cox484f8012015-02-19 13:38:46 -0500415 h.arena_reserved = reserved
416 } else {
417 var stat uint64
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700418 sysFree(unsafe.Pointer(p), p_size, &stat)
Russ Cox484f8012015-02-19 13:38:46 -0500419 }
420 }
421 }
422
Matthew Dempsky4ff231b2015-10-26 17:53:22 -0700423 if n <= h.arena_end-h.arena_used {
Russ Cox484f8012015-02-19 13:38:46 -0500424 // Keep taking from our reservation.
425 p := h.arena_used
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700426 sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
Russ Coxd57c8892015-06-07 22:59:29 -0400427 mHeap_MapBits(h, p+n)
428 mHeap_MapSpans(h, p+n)
Russ Coxa3b97972015-06-15 13:35:56 -0400429 h.arena_used = p + n
Russ Cox484f8012015-02-19 13:38:46 -0500430 if raceenabled {
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700431 racemapshadow(unsafe.Pointer(p), n)
Russ Cox484f8012015-02-19 13:38:46 -0500432 }
Russ Cox484f8012015-02-19 13:38:46 -0500433
434 if uintptr(p)&(_PageSize-1) != 0 {
435 throw("misrounded allocation in MHeap_SysAlloc")
436 }
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700437 return unsafe.Pointer(p)
Russ Cox484f8012015-02-19 13:38:46 -0500438 }
439
440 // If using 64-bit, our reservation is all we have.
Matthew Dempsky4ff231b2015-10-26 17:53:22 -0700441 if h.arena_end-h.arena_start >= _MaxArena32 {
Russ Cox484f8012015-02-19 13:38:46 -0500442 return nil
443 }
444
445 // On 32-bit, once the reservation is gone we can
446 // try to get memory at a location chosen by the OS
447 // and hope that it is in the range we allocated bitmap for.
448 p_size := round(n, _PageSize) + _PageSize
449 p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
450 if p == 0 {
451 return nil
452 }
453
Matthew Dempsky4ff231b2015-10-26 17:53:22 -0700454 if p < h.arena_start || uintptr(p)+p_size-h.arena_start >= _MaxArena32 {
Russ Cox484f8012015-02-19 13:38:46 -0500455 print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700456 sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
Russ Cox484f8012015-02-19 13:38:46 -0500457 return nil
458 }
459
460 p_end := p + p_size
461 p += -p & (_PageSize - 1)
Matthew Dempsky4ff231b2015-10-26 17:53:22 -0700462 if uintptr(p)+n > h.arena_used {
Russ Coxd57c8892015-06-07 22:59:29 -0400463 mHeap_MapBits(h, p+n)
464 mHeap_MapSpans(h, p+n)
Russ Coxa3b97972015-06-15 13:35:56 -0400465 h.arena_used = p + n
Russ Cox484f8012015-02-19 13:38:46 -0500466 if p_end > h.arena_end {
467 h.arena_end = p_end
468 }
Russ Cox484f8012015-02-19 13:38:46 -0500469 if raceenabled {
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700470 racemapshadow(unsafe.Pointer(p), n)
Russ Cox484f8012015-02-19 13:38:46 -0500471 }
472 }
473
474 if uintptr(p)&(_PageSize-1) != 0 {
475 throw("misrounded allocation in MHeap_SysAlloc")
476 }
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700477 return unsafe.Pointer(p)
Russ Cox484f8012015-02-19 13:38:46 -0500478}
479
Russ Coxd2574e22014-09-16 10:22:15 -0400480// base address for all 0-byte allocations
481var zerobase uintptr
Keith Randall4aa50432014-07-30 09:01:52 -0700482
Russ Cox484f8012015-02-19 13:38:46 -0500483const (
484 // flags to malloc
485 _FlagNoScan = 1 << 0 // GC doesn't have to scan object
486 _FlagNoZero = 1 << 1 // don't zero memory
487)
Rick Hudsondb7fd1c2015-01-06 14:58:49 -0500488
Dmitriy Vyukov39506552014-08-05 17:03:06 +0400489// Allocate an object of size bytes.
490// Small objects are allocated from the per-P cache's free lists.
Keith Randall4aa50432014-07-30 09:01:52 -0700491// Large objects (> 32 kB) are allocated straight from the heap.
Austin Clements489ff752014-11-03 13:26:46 -0500492func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
Rick Hudsond0eab032015-03-05 17:33:08 -0500493 if gcphase == _GCmarktermination {
494 throw("mallocgc called with gcphase == _GCmarktermination")
495 }
Russ Cox564eab82015-03-08 20:56:15 -0400496
Keith Randall4aa50432014-07-30 09:01:52 -0700497 if size == 0 {
Russ Coxd2574e22014-09-16 10:22:15 -0400498 return unsafe.Pointer(&zerobase)
Keith Randall4aa50432014-07-30 09:01:52 -0700499 }
Keith Randall4aa50432014-07-30 09:01:52 -0700500
Russ Coxbffb0592014-09-09 01:08:34 -0400501 if flags&flagNoScan == 0 && typ == nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800502 throw("malloc missing type")
Russ Coxbffb0592014-09-09 01:08:34 -0400503 }
504
Russ Cox564eab82015-03-08 20:56:15 -0400505 if debug.sbrk != 0 {
506 align := uintptr(16)
507 if typ != nil {
508 align = uintptr(typ.align)
509 }
510 return persistentalloc(size, align, &memstats.other_sys)
511 }
512
Austin Clements65aa2da2015-10-04 20:56:11 -0700513 // assistG is the G to charge for this allocation, or nil if
514 // GC is not currently active.
515 var assistG *g
516 if gcBlackenEnabled != 0 {
517 // Charge the current user G for this allocation.
518 assistG = getg()
519 if assistG.m.curg != nil {
520 assistG = assistG.m.curg
521 }
522 // Charge the allocation against the G. We'll account
523 // for internal fragmentation at the end of mallocgc.
524 assistG.gcAssistBytes -= int64(size)
525
526 if assistG.gcAssistBytes < 0 {
527 // This G is in debt. Assist the GC to correct
528 // this before allocating. This must happen
529 // before disabling preemption.
530 gcAssistAlloc(assistG)
531 }
532 }
533
Russ Cox3965d752015-01-16 14:43:38 -0500534 // Set mp.mallocing to keep from being preempted by GC.
535 mp := acquirem()
536 if mp.mallocing != 0 {
537 throw("malloc deadlock")
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400538 }
Brad Fitzpatrick6f2c0f12015-05-12 10:01:37 -0700539 if mp.gsignal == getg() {
540 throw("malloc during signal")
541 }
Russ Cox3965d752015-01-16 14:43:38 -0500542 mp.mallocing = 1
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400543
Russ Cox564eab82015-03-08 20:56:15 -0400544 shouldhelpgc := false
545 dataSize := size
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400546 c := gomcache()
Keith Randall4aa50432014-07-30 09:01:52 -0700547 var s *mspan
548 var x unsafe.Pointer
549 if size <= maxSmallSize {
550 if flags&flagNoScan != 0 && size < maxTinySize {
551 // Tiny allocator.
552 //
553 // Tiny allocator combines several tiny allocation requests
554 // into a single memory block. The resulting memory block
555 // is freed when all subobjects are unreachable. The subobjects
556 // must be FlagNoScan (don't have pointers), this ensures that
557 // the amount of potentially wasted memory is bounded.
558 //
559 // Size of the memory block used for combining (maxTinySize) is tunable.
560 // Current setting is 16 bytes, which relates to 2x worst case memory
561 // wastage (when all but one subobjects are unreachable).
562 // 8 bytes would result in no wastage at all, but provides less
563 // opportunities for combining.
564 // 32 bytes provides more opportunities for combining,
565 // but can lead to 4x worst case wastage.
566 // The best case winning is 8x regardless of block size.
567 //
568 // Objects obtained from tiny allocator must not be freed explicitly.
569 // So when an object will be freed explicitly, we ensure that
570 // its size >= maxTinySize.
571 //
572 // SetFinalizer has a special case for objects potentially coming
573 // from tiny allocator, it such case it allows to set finalizers
574 // for an inner byte of a memory block.
575 //
576 // The main targets of tiny allocator are small strings and
577 // standalone escaping variables. On a json benchmark
578 // the allocator reduces number of allocations by ~12% and
579 // reduces heap size by ~20%.
Russ Cox0e840882015-01-14 14:13:55 -0500580 off := c.tinyoffset
581 // Align tiny pointer for required (conservative) alignment.
582 if size&7 == 0 {
583 off = round(off, 8)
584 } else if size&3 == 0 {
585 off = round(off, 4)
586 } else if size&1 == 0 {
587 off = round(off, 2)
588 }
Russ Cox3423b7b2015-01-14 15:48:32 -0500589 if off+size <= maxTinySize && c.tiny != nil {
Russ Cox0e840882015-01-14 14:13:55 -0500590 // The object fits into existing tiny block.
591 x = add(c.tiny, off)
592 c.tinyoffset = off + size
593 c.local_tinyallocs++
Russ Cox3965d752015-01-16 14:43:38 -0500594 mp.mallocing = 0
595 releasem(mp)
Russ Cox0e840882015-01-14 14:13:55 -0500596 return x
Keith Randall4aa50432014-07-30 09:01:52 -0700597 }
598 // Allocate a new maxTinySize block.
599 s = c.alloc[tinySizeClass]
600 v := s.freelist
Rick Hudson8cfb0842014-11-20 12:08:13 -0500601 if v.ptr() == nil {
Russ Cox656be312014-11-12 14:54:31 -0500602 systemstack(func() {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500603 mCache_Refill(c, tinySizeClass)
604 })
Rick Hudsondb7fd1c2015-01-06 14:58:49 -0500605 shouldhelpgc = true
Keith Randall4aa50432014-07-30 09:01:52 -0700606 s = c.alloc[tinySizeClass]
607 v = s.freelist
608 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500609 s.freelist = v.ptr().next
Keith Randall4aa50432014-07-30 09:01:52 -0700610 s.ref++
Rick Hudson99482f22015-02-19 18:11:24 -0500611 // prefetchnta offers best performance, see change list message.
612 prefetchnta(uintptr(v.ptr().next))
Keith Randall4aa50432014-07-30 09:01:52 -0700613 x = unsafe.Pointer(v)
614 (*[2]uint64)(x)[0] = 0
615 (*[2]uint64)(x)[1] = 0
616 // See if we need to replace the existing tiny block with the new one
617 // based on amount of remaining free space.
Matthew Dempskyd18167f2015-10-26 12:38:47 -0700618 if size < c.tinyoffset || c.tiny == nil {
Russ Cox0e840882015-01-14 14:13:55 -0500619 c.tiny = x
620 c.tinyoffset = size
Keith Randall4aa50432014-07-30 09:01:52 -0700621 }
622 size = maxTinySize
623 } else {
624 var sizeclass int8
625 if size <= 1024-8 {
626 sizeclass = size_to_class8[(size+7)>>3]
627 } else {
628 sizeclass = size_to_class128[(size-1024+127)>>7]
629 }
630 size = uintptr(class_to_size[sizeclass])
631 s = c.alloc[sizeclass]
632 v := s.freelist
Rick Hudson8cfb0842014-11-20 12:08:13 -0500633 if v.ptr() == nil {
Russ Cox656be312014-11-12 14:54:31 -0500634 systemstack(func() {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500635 mCache_Refill(c, int32(sizeclass))
636 })
Rick Hudsondb7fd1c2015-01-06 14:58:49 -0500637 shouldhelpgc = true
Keith Randall4aa50432014-07-30 09:01:52 -0700638 s = c.alloc[sizeclass]
639 v = s.freelist
640 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500641 s.freelist = v.ptr().next
Keith Randall4aa50432014-07-30 09:01:52 -0700642 s.ref++
Rick Hudson99482f22015-02-19 18:11:24 -0500643 // prefetchnta offers best performance, see change list message.
644 prefetchnta(uintptr(v.ptr().next))
Keith Randall4aa50432014-07-30 09:01:52 -0700645 x = unsafe.Pointer(v)
646 if flags&flagNoZero == 0 {
Rick Hudson8cfb0842014-11-20 12:08:13 -0500647 v.ptr().next = 0
Keith Randall4aa50432014-07-30 09:01:52 -0700648 if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
649 memclr(unsafe.Pointer(v), size)
650 }
651 }
652 }
Austin Clements91c80ce2015-04-13 17:06:22 -0400653 c.local_cachealloc += size
Keith Randall4aa50432014-07-30 09:01:52 -0700654 } else {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500655 var s *mspan
Rick Hudsondb7fd1c2015-01-06 14:58:49 -0500656 shouldhelpgc = true
Russ Cox656be312014-11-12 14:54:31 -0500657 systemstack(func() {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500658 s = largeAlloc(size, uint32(flags))
659 })
Keith Randall4aa50432014-07-30 09:01:52 -0700660 x = unsafe.Pointer(uintptr(s.start << pageShift))
661 size = uintptr(s.elemsize)
662 }
663
Dmitriy Vyukov187d0f62014-08-13 20:42:55 +0400664 if flags&flagNoScan != 0 {
Russ Cox3965d752015-01-16 14:43:38 -0500665 // All objects are pre-marked as noscan. Nothing to do.
666 } else {
667 // If allocating a defer+arg block, now that we've picked a malloc size
668 // large enough to hold everything, cut the "asked for" size down to
669 // just the defer header, so that the GC bitmap will record the arg block
670 // as containing nothing at all (as if it were unused space at the end of
671 // a malloc block caused by size rounding).
672 // The defer arg areas are scanned as part of scanstack.
673 if typ == deferType {
674 dataSize = unsafe.Sizeof(_defer{})
675 }
676 heapBitsSetType(uintptr(x), size, dataSize, typ)
Austin Clements3be3cbd2015-05-04 16:10:49 -0400677 if dataSize > typ.size {
678 // Array allocation. If there are any
679 // pointers, GC has to scan to the last
680 // element.
681 if typ.ptrdata != 0 {
682 c.local_scan += dataSize - typ.size + typ.ptrdata
683 }
684 } else {
685 c.local_scan += typ.ptrdata
686 }
Austin Clementsf5d494b2015-06-15 12:30:23 -0400687
688 // Ensure that the stores above that initialize x to
689 // type-safe memory and set the heap bits occur before
690 // the caller can make x observable to the garbage
691 // collector. Otherwise, on weakly ordered machines,
692 // the garbage collector could follow a pointer to x,
693 // but see uninitialized memory or stale heap bits.
694 publicationBarrier()
Dmitriy Vyukovaac7f1a2014-08-07 13:34:30 +0400695 }
Keith Randall4aa50432014-07-30 09:01:52 -0700696
Rick Hudson77db7372014-11-04 13:31:34 -0500697 // GCmarkterminate allocates black
698 // All slots hold nil so no scanning is needed.
699 // This may be racing with GC so do it atomically if there can be
700 // a race marking the bit.
Rick Hudson90a19962015-06-01 18:16:03 -0400701 if gcphase == _GCmarktermination || gcBlackenPromptly {
Russ Cox0fcf54b2014-11-15 08:00:38 -0500702 systemstack(func() {
Austin Clements50a66562015-03-12 16:53:57 -0400703 gcmarknewobject_m(uintptr(x), size)
Russ Cox0fcf54b2014-11-15 08:00:38 -0500704 })
Rick Hudson77db7372014-11-04 13:31:34 -0500705 }
706
Keith Randall4aa50432014-07-30 09:01:52 -0700707 if raceenabled {
708 racemalloc(x, size)
709 }
Ian Lance Taylor73f329f2015-10-21 11:04:42 -0700710 if msanenabled {
711 msanmalloc(x, size)
712 }
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400713
Russ Cox3965d752015-01-16 14:43:38 -0500714 mp.mallocing = 0
715 releasem(mp)
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400716
Keith Randall4aa50432014-07-30 09:01:52 -0700717 if debug.allocfreetrace != 0 {
718 tracealloc(x, size, typ)
719 }
Dmitriy Vyukov18374192014-08-13 01:03:32 +0400720
721 if rate := MemProfileRate; rate > 0 {
722 if size < uintptr(rate) && int32(size) < c.next_sample {
723 c.next_sample -= int32(size)
724 } else {
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400725 mp := acquirem()
Dmitriy Vyukov18374192014-08-13 01:03:32 +0400726 profilealloc(mp, x, size)
Dmitriy Vyukov30940cf2014-08-18 16:33:39 +0400727 releasem(mp)
Keith Randall4aa50432014-07-30 09:01:52 -0700728 }
729 }
730
Austin Clements65aa2da2015-10-04 20:56:11 -0700731 if assistG != nil {
732 // Account for internal fragmentation in the assist
733 // debt now that we know it.
734 assistG.gcAssistBytes -= int64(size - dataSize)
735 }
736
Austin Clementsf54bced2015-10-23 14:15:18 -0400737 if shouldhelpgc && gcShouldStart(false) {
738 gcStart(gcBackgroundMode, false)
Keith Randall4aa50432014-07-30 09:01:52 -0700739 }
740
741 return x
742}
743
Russ Cox484f8012015-02-19 13:38:46 -0500744func largeAlloc(size uintptr, flag uint32) *mspan {
745 // print("largeAlloc size=", size, "\n")
746
747 if size+_PageSize < size {
748 throw("out of memory")
749 }
750 npages := size >> _PageShift
751 if size&_PageMask != 0 {
752 npages++
753 }
Austin Clementsfc9ca852015-08-03 09:46:50 -0400754
755 // Deduct credit for this span allocation and sweep if
756 // necessary. mHeap_Alloc will also sweep npages, so this only
757 // pays the debt down to npage pages.
758 deductSweepCredit(npages*_PageSize, npages)
759
Russ Cox484f8012015-02-19 13:38:46 -0500760 s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
761 if s == nil {
762 throw("out of memory")
763 }
764 s.limit = uintptr(s.start)<<_PageShift + size
765 heapBitsForSpan(s.base()).initSpan(s.layout())
766 return s
767}
768
Keith Randall4aa50432014-07-30 09:01:52 -0700769// implementation of new builtin
770func newobject(typ *_type) unsafe.Pointer {
Austin Clements489ff752014-11-03 13:26:46 -0500771 flags := uint32(0)
Keith Randall4aa50432014-07-30 09:01:52 -0700772 if typ.kind&kindNoPointers != 0 {
773 flags |= flagNoScan
774 }
Russ Coxbffb0592014-09-09 01:08:34 -0400775 return mallocgc(uintptr(typ.size), typ, flags)
Keith Randall4aa50432014-07-30 09:01:52 -0700776}
777
Russ Cox7a524a12014-12-22 13:27:53 -0500778//go:linkname reflect_unsafe_New reflect.unsafe_New
779func reflect_unsafe_New(typ *_type) unsafe.Pointer {
780 return newobject(typ)
781}
782
Keith Randall4aa50432014-07-30 09:01:52 -0700783// implementation of make builtin for slices
784func newarray(typ *_type, n uintptr) unsafe.Pointer {
Austin Clements489ff752014-11-03 13:26:46 -0500785 flags := uint32(0)
Keith Randall4aa50432014-07-30 09:01:52 -0700786 if typ.kind&kindNoPointers != 0 {
787 flags |= flagNoScan
788 }
Russ Cox1e2d2f02014-11-11 17:05:02 -0500789 if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
Keith Randall4aa50432014-07-30 09:01:52 -0700790 panic("runtime: allocation size out of range")
791 }
Russ Coxbffb0592014-09-09 01:08:34 -0400792 return mallocgc(uintptr(typ.size)*n, typ, flags)
Keith Randall4aa50432014-07-30 09:01:52 -0700793}
794
Russ Cox7a524a12014-12-22 13:27:53 -0500795//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
796func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer {
797 return newarray(typ, n)
798}
799
Keith Randallcc9ec522014-07-31 12:43:40 -0700800// rawmem returns a chunk of pointerless memory. It is
801// not zeroed.
802func rawmem(size uintptr) unsafe.Pointer {
Russ Coxbffb0592014-09-09 01:08:34 -0400803 return mallocgc(size, nil, flagNoScan|flagNoZero)
Keith Randallcc9ec522014-07-31 12:43:40 -0700804}
805
Keith Randall4aa50432014-07-30 09:01:52 -0700806func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
Raul Silvera27ee7192015-09-14 14:03:45 -0700807 mp.mcache.next_sample = nextSample()
808 mProf_Malloc(x, size)
809}
810
811// nextSample returns the next sampling point for heap profiling.
812// It produces a random variable with a geometric distribution and
813// mean MemProfileRate. This is done by generating a uniformly
814// distributed random number and applying the cumulative distribution
815// function for an exponential.
816func nextSample() int32 {
David du Colombier31430bd2015-10-28 06:44:26 +0100817 if GOOS == "plan9" {
818 // Plan 9 doesn't support floating point in note handler.
819 if g := getg(); g == g.m.gsignal {
820 return nextSampleNoFP()
821 }
822 }
823
Raul Silvera27ee7192015-09-14 14:03:45 -0700824 period := MemProfileRate
825
826 // make nextSample not overflow. Maximum possible step is
827 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
828 switch {
829 case period > 0x7000000:
830 period = 0x7000000
831 case period == 0:
832 return 0
Keith Randall4aa50432014-07-30 09:01:52 -0700833 }
Russ Cox548d0802014-09-01 18:51:12 -0400834
Raul Silvera27ee7192015-09-14 14:03:45 -0700835 // Let m be the sample rate,
836 // the probability distribution function is m*exp(-mx), so the CDF is
837 // p = 1 - exp(-mx), so
838 // q = 1 - p == exp(-mx)
839 // log_e(q) = -mx
840 // -log_e(q)/m = x
841 // x = -log_e(q) * period
842 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
843 const randomBitCount = 26
844 q := uint32(fastrand1())%(1<<randomBitCount) + 1
845 qlog := fastlog2(float64(q)) - randomBitCount
846 if qlog > 0 {
847 qlog = 0
848 }
849 const minusLog2 = -0.6931471805599453 // -ln(2)
850 return int32(qlog*(minusLog2*float64(period))) + 1
Keith Randall4aa50432014-07-30 09:01:52 -0700851}
852
David du Colombier31430bd2015-10-28 06:44:26 +0100853// nextSampleNoFP is similar to nextSample, but uses older,
854// simpler code to avoid floating point.
855func nextSampleNoFP() int32 {
856 // Set first allocation sample size.
857 rate := MemProfileRate
858 if rate > 0x3fffffff { // make 2*rate not overflow
859 rate = 0x3fffffff
860 }
861 if rate != 0 {
862 return int32(int(fastrand1()) % (2 * rate))
863 }
864 return 0
865}
866
Russ Cox564eab82015-03-08 20:56:15 -0400867type persistentAlloc struct {
Russ Cox0e840882015-01-14 14:13:55 -0500868 base unsafe.Pointer
869 off uintptr
Russ Coxe3edfea2014-09-04 00:54:06 -0400870}
871
Russ Cox564eab82015-03-08 20:56:15 -0400872var globalAlloc struct {
873 mutex
874 persistentAlloc
875}
876
Russ Coxe3edfea2014-09-04 00:54:06 -0400877// Wrapper around sysAlloc that can allocate small chunks.
878// There is no associated free operation.
879// Intended for things like function/type/debug-related persistent data.
880// If align is 0, uses default align (currently 8).
Srdjan Petrovic6ad33be2015-04-16 14:32:18 -0700881func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
Russ Cox08e25fc2015-06-07 21:45:39 -0400882 var p unsafe.Pointer
883 systemstack(func() {
884 p = persistentalloc1(size, align, sysStat)
885 })
886 return p
887}
888
889// Must run on system stack because stack growth can (re)invoke it.
890// See issue 9174.
891//go:systemstack
892func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
Russ Coxe3edfea2014-09-04 00:54:06 -0400893 const (
894 chunk = 256 << 10
895 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
896 )
897
Russ Cox0e840882015-01-14 14:13:55 -0500898 if size == 0 {
899 throw("persistentalloc: size == 0")
900 }
Russ Coxe3edfea2014-09-04 00:54:06 -0400901 if align != 0 {
902 if align&(align-1) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800903 throw("persistentalloc: align is not a power of 2")
Russ Coxe3edfea2014-09-04 00:54:06 -0400904 }
905 if align > _PageSize {
Keith Randallb2a950b2014-12-27 20:58:00 -0800906 throw("persistentalloc: align is too large")
Russ Coxe3edfea2014-09-04 00:54:06 -0400907 }
908 } else {
909 align = 8
910 }
911
912 if size >= maxBlock {
Srdjan Petrovic6ad33be2015-04-16 14:32:18 -0700913 return sysAlloc(size, sysStat)
Russ Coxe3edfea2014-09-04 00:54:06 -0400914 }
915
Russ Cox564eab82015-03-08 20:56:15 -0400916 mp := acquirem()
917 var persistent *persistentAlloc
Russ Cox181e26b2015-04-17 00:21:30 -0400918 if mp != nil && mp.p != 0 {
919 persistent = &mp.p.ptr().palloc
Russ Cox564eab82015-03-08 20:56:15 -0400920 } else {
921 lock(&globalAlloc.mutex)
922 persistent = &globalAlloc.persistentAlloc
923 }
Russ Cox0e840882015-01-14 14:13:55 -0500924 persistent.off = round(persistent.off, align)
Russ Cox3423b7b2015-01-14 15:48:32 -0500925 if persistent.off+size > chunk || persistent.base == nil {
Russ Cox0e840882015-01-14 14:13:55 -0500926 persistent.base = sysAlloc(chunk, &memstats.other_sys)
927 if persistent.base == nil {
Russ Cox564eab82015-03-08 20:56:15 -0400928 if persistent == &globalAlloc.persistentAlloc {
929 unlock(&globalAlloc.mutex)
930 }
Keith Randallb2a950b2014-12-27 20:58:00 -0800931 throw("runtime: cannot allocate memory")
Russ Coxe3edfea2014-09-04 00:54:06 -0400932 }
Russ Cox0e840882015-01-14 14:13:55 -0500933 persistent.off = 0
Russ Coxe3edfea2014-09-04 00:54:06 -0400934 }
Russ Cox0e840882015-01-14 14:13:55 -0500935 p := add(persistent.base, persistent.off)
936 persistent.off += size
Russ Cox564eab82015-03-08 20:56:15 -0400937 releasem(mp)
938 if persistent == &globalAlloc.persistentAlloc {
939 unlock(&globalAlloc.mutex)
940 }
Russ Coxe3edfea2014-09-04 00:54:06 -0400941
Srdjan Petrovic6ad33be2015-04-16 14:32:18 -0700942 if sysStat != &memstats.other_sys {
943 mSysStatInc(sysStat, size)
944 mSysStatDec(&memstats.other_sys, size)
Russ Coxe3edfea2014-09-04 00:54:06 -0400945 }
946 return p
947}