| // Copyright 2009 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| // See malloc.h for overview. |
| // |
| // TODO(rsc): double-check stats. |
| |
| package runtime |
| #include "runtime.h" |
| #include "stack.h" |
| #include "malloc.h" |
| #include "defs.h" |
| #include "type.h" |
| |
| MHeap runtime·mheap; |
| extern MStats mstats; // defined in extern.go |
| |
| extern volatile int32 runtime·MemProfileRate; |
| |
| // Same algorithm from chan.c, but a different |
| // instance of the static uint32 x. |
| // Not protected by a lock - let the threads use |
| // the same random number if they like. |
| static uint32 |
| fastrand1(void) |
| { |
| static uint32 x = 0x49f6428aUL; |
| |
| x += x; |
| if(x & 0x80000000L) |
| x ^= 0x88888eefUL; |
| return x; |
| } |
| |
| // Allocate an object of at least size bytes. |
| // Small objects are allocated from the per-thread cache's free lists. |
| // Large objects (> 32 kB) are allocated straight from the heap. |
| void* |
| runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) |
| { |
| int32 sizeclass, rate; |
| MCache *c; |
| uintptr npages; |
| MSpan *s; |
| void *v; |
| |
| if(runtime·gcwaiting && g != m->g0 && m->locks == 0) |
| runtime·gosched(); |
| if(m->mallocing) |
| runtime·throw("malloc/free - deadlock"); |
| m->mallocing = 1; |
| if(size == 0) |
| size = 1; |
| |
| mstats.nmalloc++; |
| if(size <= MaxSmallSize) { |
| // Allocate from mcache free lists. |
| sizeclass = runtime·SizeToClass(size); |
| size = runtime·class_to_size[sizeclass]; |
| c = m->mcache; |
| v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); |
| if(v == nil) |
| runtime·throw("out of memory"); |
| mstats.alloc += size; |
| mstats.total_alloc += size; |
| mstats.by_size[sizeclass].nmalloc++; |
| } else { |
| // TODO(rsc): Report tracebacks for very large allocations. |
| |
| // Allocate directly from heap. |
| npages = size >> PageShift; |
| if((size & PageMask) != 0) |
| npages++; |
| s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1); |
| if(s == nil) |
| runtime·throw("out of memory"); |
| size = npages<<PageShift; |
| mstats.alloc += size; |
| mstats.total_alloc += size; |
| v = (void*)(s->start << PageShift); |
| |
| // setup for mark sweep |
| runtime·markspan(v, 0, 0, true); |
| } |
| if(!(flag & FlagNoGC)) |
| runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); |
| |
| m->mallocing = 0; |
| |
| if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { |
| if(size >= rate) |
| goto profile; |
| if(m->mcache->next_sample > size) |
| m->mcache->next_sample -= size; |
| else { |
| // pick next profile time |
| if(rate > 0x3fffffff) // make 2*rate not overflow |
| rate = 0x3fffffff; |
| m->mcache->next_sample = fastrand1() % (2*rate); |
| profile: |
| runtime·setblockspecial(v); |
| runtime·MProf_Malloc(v, size); |
| } |
| } |
| |
| if(dogc && mstats.heap_alloc >= mstats.next_gc) |
| runtime·gc(0); |
| return v; |
| } |
| |
| void* |
| runtime·malloc(uintptr size) |
| { |
| return runtime·mallocgc(size, 0, 0, 1); |
| } |
| |
| // Free the object whose base pointer is v. |
| void |
| runtime·free(void *v) |
| { |
| int32 sizeclass; |
| MSpan *s; |
| MCache *c; |
| uint32 prof; |
| uintptr size; |
| |
| if(v == nil) |
| return; |
| |
| // If you change this also change mgc0.c:/^sweepspan, |
| // which has a copy of the guts of free. |
| |
| if(m->mallocing) |
| runtime·throw("malloc/free - deadlock"); |
| m->mallocing = 1; |
| |
| if(!runtime·mlookup(v, nil, nil, &s)) { |
| runtime·printf("free %p: not an allocated block\n", v); |
| runtime·throw("free runtime·mlookup"); |
| } |
| prof = runtime·blockspecial(v); |
| |
| // Find size class for v. |
| sizeclass = s->sizeclass; |
| if(sizeclass == 0) { |
| // Large object. |
| size = s->npages<<PageShift; |
| *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed" |
| // Must mark v freed before calling unmarkspan and MHeap_Free: |
| // they might coalesce v into other spans and change the bitmap further. |
| runtime·markfreed(v, size); |
| runtime·unmarkspan(v, 1<<PageShift); |
| runtime·MHeap_Free(&runtime·mheap, s, 1); |
| } else { |
| // Small object. |
| c = m->mcache; |
| size = runtime·class_to_size[sizeclass]; |
| if(size > sizeof(uintptr)) |
| ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" |
| // Must mark v freed before calling MCache_Free: |
| // it might coalesce v and other blocks into a bigger span |
| // and change the bitmap further. |
| runtime·markfreed(v, size); |
| mstats.by_size[sizeclass].nfree++; |
| runtime·MCache_Free(c, v, sizeclass, size); |
| } |
| mstats.alloc -= size; |
| if(prof) |
| runtime·MProf_Free(v, size); |
| m->mallocing = 0; |
| } |
| |
| int32 |
| runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) |
| { |
| uintptr n, i; |
| byte *p; |
| MSpan *s; |
| |
| mstats.nlookup++; |
| s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); |
| if(sp) |
| *sp = s; |
| if(s == nil) { |
| runtime·checkfreed(v, 1); |
| if(base) |
| *base = nil; |
| if(size) |
| *size = 0; |
| return 0; |
| } |
| |
| p = (byte*)((uintptr)s->start<<PageShift); |
| if(s->sizeclass == 0) { |
| // Large object. |
| if(base) |
| *base = p; |
| if(size) |
| *size = s->npages<<PageShift; |
| return 1; |
| } |
| |
| if((byte*)v >= (byte*)s->limit) { |
| // pointers past the last block do not count as pointers. |
| return 0; |
| } |
| |
| n = runtime·class_to_size[s->sizeclass]; |
| i = ((byte*)v - p)/n; |
| if(base) |
| *base = p + i*n; |
| if(size) |
| *size = n; |
| |
| return 1; |
| } |
| |
| MCache* |
| runtime·allocmcache(void) |
| { |
| MCache *c; |
| |
| runtime·lock(&runtime·mheap); |
| c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); |
| mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; |
| mstats.mcache_sys = runtime·mheap.cachealloc.sys; |
| runtime·unlock(&runtime·mheap); |
| return c; |
| } |
| |
| int32 runtime·sizeof_C_MStats = sizeof(MStats); |
| |
| #define MaxArena32 (2U<<30) |
| |
| void |
| runtime·mallocinit(void) |
| { |
| byte *p; |
| uintptr arena_size, bitmap_size; |
| extern byte end[]; |
| |
| runtime·InitSizes(); |
| |
| // Set up the allocation arena, a contiguous area of memory where |
| // allocated data will be found. The arena begins with a bitmap large |
| // enough to hold 4 bits per allocated word. |
| if(sizeof(void*) == 8) { |
| // On a 64-bit machine, allocate from a single contiguous reservation. |
| // 16 GB should be big enough for now. |
| // |
| // The code will work with the reservation at any address, but ask |
| // SysReserve to use 0x000000f800000000 if possible. |
| // Allocating a 16 GB region takes away 36 bits, and the amd64 |
| // doesn't let us choose the top 17 bits, so that leaves the 11 bits |
| // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means |
| // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb. |
| // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and |
| // they are otherwise as far from ff (likely a common byte) as possible. |
| // Choosing 0x00 for the leading 6 bits was more arbitrary, but it |
| // is not a common ASCII code point either. Using 0x11f8 instead |
| // caused out of memory errors on OS X during thread allocations. |
| // These choices are both for debuggability and to reduce the |
| // odds of the conservative garbage collector not collecting memory |
| // because some non-pointer block of memory had a bit pattern |
| // that matched a memory address. |
| // |
| // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) |
| // but it hardly matters: fc is not valid UTF-8 either, and we have to |
| // allocate 15 GB before we get that far. |
| arena_size = 16LL<<30; |
| bitmap_size = arena_size / (sizeof(void*)*8/4); |
| p = runtime·SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size); |
| if(p == nil) |
| runtime·throw("runtime: cannot reserve arena virtual address space"); |
| } else { |
| // On a 32-bit machine, we can't typically get away |
| // with a giant virtual address space reservation. |
| // Instead we map the memory information bitmap |
| // immediately after the data segment, large enough |
| // to handle another 2GB of mappings (256 MB), |
| // along with a reservation for another 512 MB of memory. |
| // When that gets used up, we'll start asking the kernel |
| // for any memory anywhere and hope it's in the 2GB |
| // following the bitmap (presumably the executable begins |
| // near the bottom of memory, so we'll have to use up |
| // most of memory before the kernel resorts to giving out |
| // memory before the beginning of the text segment). |
| // |
| // Alternatively we could reserve 512 MB bitmap, enough |
| // for 4GB of mappings, and then accept any memory the |
| // kernel threw at us, but normally that's a waste of 512 MB |
| // of address space, which is probably too much in a 32-bit world. |
| bitmap_size = MaxArena32 / (sizeof(void*)*8/4); |
| arena_size = 512<<20; |
| |
| // SysReserve treats the address we ask for, end, as a hint, |
| // not as an absolute requirement. If we ask for the end |
| // of the data segment but the operating system requires |
| // a little more space before we can start allocating, it will |
| // give out a slightly higher pointer. That's fine. |
| // Run with what we get back. |
| p = runtime·SysReserve(end, bitmap_size + arena_size); |
| if(p == nil) |
| runtime·throw("runtime: cannot reserve arena virtual address space"); |
| } |
| if((uintptr)p & (((uintptr)1<<PageShift)-1)) |
| runtime·throw("runtime: SysReserve returned unaligned address"); |
| |
| runtime·mheap.bitmap = p; |
| runtime·mheap.arena_start = p + bitmap_size; |
| runtime·mheap.arena_used = runtime·mheap.arena_start; |
| runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; |
| |
| // Initialize the rest of the allocator. |
| runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc); |
| m->mcache = runtime·allocmcache(); |
| |
| // See if it works. |
| runtime·free(runtime·malloc(1)); |
| } |
| |
| void* |
| runtime·MHeap_SysAlloc(MHeap *h, uintptr n) |
| { |
| byte *p; |
| |
| if(n <= h->arena_end - h->arena_used) { |
| // Keep taking from our reservation. |
| p = h->arena_used; |
| runtime·SysMap(p, n); |
| h->arena_used += n; |
| runtime·MHeap_MapBits(h); |
| return p; |
| } |
| |
| // On 64-bit, our reservation is all we have. |
| if(sizeof(void*) == 8) |
| return nil; |
| |
| // On 32-bit, once the reservation is gone we can |
| // try to get memory at a location chosen by the OS |
| // and hope that it is in the range we allocated bitmap for. |
| p = runtime·SysAlloc(n); |
| if(p == nil) |
| return nil; |
| |
| if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { |
| runtime·printf("runtime: memory allocated by OS not in usable range"); |
| runtime·SysFree(p, n); |
| return nil; |
| } |
| |
| if(p+n > h->arena_used) { |
| h->arena_used = p+n; |
| if(h->arena_used > h->arena_end) |
| h->arena_end = h->arena_used; |
| runtime·MHeap_MapBits(h); |
| } |
| |
| return p; |
| } |
| |
| // Runtime stubs. |
| |
| void* |
| runtime·mal(uintptr n) |
| { |
| return runtime·mallocgc(n, 0, 1, 1); |
| } |
| |
| func new(n uint32) (ret *uint8) { |
| ret = runtime·mal(n); |
| } |
| |
| // Stack allocator uses malloc/free most of the time, |
| // but if we're in the middle of malloc and need stack, |
| // we have to do something else to avoid deadlock. |
| // In that case, we fall back on a fixed-size free-list |
| // allocator, assuming that inside malloc all the stack |
| // frames are small, so that all the stack allocations |
| // will be a single size, the minimum (right now, 5k). |
| static struct { |
| Lock; |
| FixAlloc; |
| } stacks; |
| |
| enum { |
| FixedStack = StackMin, |
| }; |
| |
| void* |
| runtime·stackalloc(uint32 n) |
| { |
| void *v; |
| |
| // Stackalloc must be called on scheduler stack, so that we |
| // never try to grow the stack during the code that stackalloc runs. |
| // Doing so would cause a deadlock (issue 1547). |
| if(g != m->g0) |
| runtime·throw("stackalloc not on scheduler stack"); |
| |
| if(m->mallocing || m->gcing || n == FixedStack) { |
| runtime·lock(&stacks); |
| if(stacks.size == 0) |
| runtime·FixAlloc_Init(&stacks, n, runtime·SysAlloc, nil, nil); |
| if(stacks.size != n) { |
| runtime·printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n); |
| runtime·throw("stackalloc"); |
| } |
| v = runtime·FixAlloc_Alloc(&stacks); |
| mstats.stacks_inuse = stacks.inuse; |
| mstats.stacks_sys = stacks.sys; |
| runtime·unlock(&stacks); |
| return v; |
| } |
| return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0); |
| } |
| |
| void |
| runtime·stackfree(void *v, uintptr n) |
| { |
| if(m->mallocing || m->gcing || n == FixedStack) { |
| runtime·lock(&stacks); |
| runtime·FixAlloc_Free(&stacks, v); |
| mstats.stacks_inuse = stacks.inuse; |
| mstats.stacks_sys = stacks.sys; |
| runtime·unlock(&stacks); |
| return; |
| } |
| runtime·free(v); |
| } |
| |
| func Alloc(n uintptr) (p *byte) { |
| p = runtime·malloc(n); |
| } |
| |
| func Free(p *byte) { |
| runtime·free(p); |
| } |
| |
| func Lookup(p *byte) (base *byte, size uintptr) { |
| runtime·mlookup(p, &base, &size, nil); |
| } |
| |
| func GC() { |
| runtime·gc(1); |
| } |
| |
| func SetFinalizer(obj Eface, finalizer Eface) { |
| byte *base; |
| uintptr size; |
| FuncType *ft; |
| int32 i, nret; |
| Type *t; |
| |
| if(obj.type == nil) { |
| runtime·printf("runtime.SetFinalizer: first argument is nil interface\n"); |
| throw: |
| runtime·throw("runtime.SetFinalizer"); |
| } |
| if(obj.type->kind != KindPtr) { |
| runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); |
| goto throw; |
| } |
| if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) { |
| runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); |
| goto throw; |
| } |
| nret = 0; |
| if(finalizer.type != nil) { |
| if(finalizer.type->kind != KindFunc) { |
| badfunc: |
| runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string); |
| goto throw; |
| } |
| ft = (FuncType*)finalizer.type; |
| if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type) |
| goto badfunc; |
| |
| // compute size needed for return parameters |
| for(i=0; i<ft->out.len; i++) { |
| t = ((Type**)ft->out.array)[i]; |
| nret = (nret + t->align - 1) & ~(t->align - 1); |
| nret += t->size; |
| } |
| nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1); |
| |
| if(runtime·getfinalizer(obj.data, 0)) { |
| runtime·printf("runtime.SetFinalizer: finalizer already set"); |
| goto throw; |
| } |
| } |
| runtime·addfinalizer(obj.data, finalizer.data, nret); |
| } |