blob: 405b05ee9654b553d622af82398171fc20e56cbd [file] [log] [blame]
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See malloc.h for overview.
//
// TODO(rsc): double-check stats.
package runtime
#include "runtime.h"
#include "malloc.h"
#include "defs.h"
#include "type.h"
MHeap runtime·mheap;
extern MStats mstats; // defined in extern.go
extern volatile int32 runtime·MemProfileRate;
// Same algorithm from chan.c, but a different
// instance of the static uint32 x.
// Not protected by a lock - let the threads use
// the same random number if they like.
static uint32
fastrand1(void)
{
static uint32 x = 0x49f6428aUL;
x += x;
if(x & 0x80000000L)
x ^= 0x88888eefUL;
return x;
}
// Allocate an object of at least size bytes.
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
void*
runtime·mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
{
int32 sizeclass, rate;
MCache *c;
uintptr npages;
MSpan *s;
void *v;
uint32 *ref;
if(runtime·gcwaiting && g != m->g0 && m->locks == 0)
runtime·gosched();
if(m->mallocing)
runtime·throw("malloc/free - deadlock");
m->mallocing = 1;
if(size == 0)
size = 1;
mstats.nmalloc++;
if(size <= MaxSmallSize) {
// Allocate from mcache free lists.
sizeclass = runtime·SizeToClass(size);
size = runtime·class_to_size[sizeclass];
c = m->mcache;
v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
if(v == nil)
runtime·throw("out of memory");
mstats.alloc += size;
mstats.total_alloc += size;
mstats.by_size[sizeclass].nmalloc++;
if(!runtime·mlookup(v, nil, nil, nil, &ref)) {
runtime·printf("malloc %D; runtime·mlookup failed\n", (uint64)size);
runtime·throw("malloc runtime·mlookup");
}
*ref = RefNone | refflag;
} else {
// TODO(rsc): Report tracebacks for very large allocations.
// Allocate directly from heap.
npages = size >> PageShift;
if((size & PageMask) != 0)
npages++;
s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1);
if(s == nil)
runtime·throw("out of memory");
size = npages<<PageShift;
mstats.alloc += size;
mstats.total_alloc += size;
v = (void*)(s->start << PageShift);
// setup for mark sweep
s->gcref0 = RefNone | refflag;
ref = &s->gcref0;
}
m->mallocing = 0;
if(!(refflag & RefNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
if(size >= rate)
goto profile;
if(m->mcache->next_sample > size)
m->mcache->next_sample -= size;
else {
// pick next profile time
if(rate > 0x3fffffff) // make 2*rate not overflow
rate = 0x3fffffff;
m->mcache->next_sample = fastrand1() % (2*rate);
profile:
*ref |= RefProfiled;
runtime·MProf_Malloc(v, size);
}
}
if(dogc && mstats.heap_alloc >= mstats.next_gc)
runtime·gc(0);
return v;
}
void*
runtime·malloc(uintptr size)
{
return runtime·mallocgc(size, 0, 0, 1);
}
// Free the object whose base pointer is v.
void
runtime·free(void *v)
{
int32 sizeclass, size;
MSpan *s;
MCache *c;
uint32 prof, *ref;
if(v == nil)
return;
if(m->mallocing)
runtime·throw("malloc/free - deadlock");
m->mallocing = 1;
if(!runtime·mlookup(v, nil, nil, &s, &ref)) {
runtime·printf("free %p: not an allocated block\n", v);
runtime·throw("free runtime·mlookup");
}
prof = *ref & RefProfiled;
*ref = RefFree;
// Find size class for v.
sizeclass = s->sizeclass;
if(sizeclass == 0) {
// Large object.
if(prof)
runtime·MProf_Free(v, s->npages<<PageShift);
mstats.alloc -= s->npages<<PageShift;
runtime·memclr(v, s->npages<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 1);
} else {
// Small object.
c = m->mcache;
size = runtime·class_to_size[sizeclass];
if(size > sizeof(uintptr))
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
if(prof)
runtime·MProf_Free(v, size);
mstats.alloc -= size;
mstats.by_size[sizeclass].nfree++;
runtime·MCache_Free(c, v, sizeclass, size);
}
m->mallocing = 0;
}
int32
runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
{
uintptr n, nobj, i;
byte *p;
MSpan *s;
mstats.nlookup++;
s = runtime·MHeap_LookupMaybe(&runtime·mheap, (uintptr)v>>PageShift);
if(sp)
*sp = s;
if(s == nil) {
if(base)
*base = nil;
if(size)
*size = 0;
if(ref)
*ref = 0;
return 0;
}
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
// Large object.
if(base)
*base = p;
if(size)
*size = s->npages<<PageShift;
if(ref)
*ref = &s->gcref0;
return 1;
}
if((byte*)v >= (byte*)s->gcref) {
// pointers into the gc ref counts
// do not count as pointers.
return 0;
}
n = runtime·class_to_size[s->sizeclass];
i = ((byte*)v - p)/n;
if(base)
*base = p + i*n;
if(size)
*size = n;
// good for error checking, but expensive
if(0) {
nobj = (s->npages << PageShift) / (n + RefcountOverhead);
if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
runtime·printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
runtime·printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
(uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
runtime·throw("bad gcref");
}
}
if(ref)
*ref = &s->gcref[i];
return 1;
}
MCache*
runtime·allocmcache(void)
{
MCache *c;
runtime·lock(&runtime·mheap);
c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
mstats.mcache_sys = runtime·mheap.cachealloc.sys;
runtime·unlock(&runtime·mheap);
return c;
}
void
runtime·mallocinit(void)
{
runtime·SysMemInit();
runtime·InitSizes();
runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
m->mcache = runtime·allocmcache();
// See if it works.
runtime·free(runtime·malloc(1));
}
// Runtime stubs.
void*
runtime·mal(uintptr n)
{
return runtime·mallocgc(n, 0, 1, 1);
}
func new(n uint32) (ret *uint8) {
ret = runtime·mal(n);
}
// Stack allocator uses malloc/free most of the time,
// but if we're in the middle of malloc and need stack,
// we have to do something else to avoid deadlock.
// In that case, we fall back on a fixed-size free-list
// allocator, assuming that inside malloc all the stack
// frames are small, so that all the stack allocations
// will be a single size, the minimum (right now, 5k).
static struct {
Lock;
FixAlloc;
} stacks;
void*
runtime·stackalloc(uint32 n)
{
void *v;
uint32 *ref;
if(m->mallocing || m->gcing) {
runtime·lock(&stacks);
if(stacks.size == 0)
runtime·FixAlloc_Init(&stacks, n, runtime·SysAlloc, nil, nil);
if(stacks.size != n) {
runtime·printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n);
runtime·throw("stackalloc");
}
v = runtime·FixAlloc_Alloc(&stacks);
mstats.stacks_inuse = stacks.inuse;
mstats.stacks_sys = stacks.sys;
runtime·unlock(&stacks);
return v;
}
v = runtime·mallocgc(n, RefNoProfiling, 0, 0);
if(!runtime·mlookup(v, nil, nil, nil, &ref))
runtime·throw("stackalloc runtime·mlookup");
*ref = RefStack;
return v;
}
void
runtime·stackfree(void *v)
{
if(m->mallocing || m->gcing) {
runtime·lock(&stacks);
runtime·FixAlloc_Free(&stacks, v);
mstats.stacks_inuse = stacks.inuse;
mstats.stacks_sys = stacks.sys;
runtime·unlock(&stacks);
return;
}
runtime·free(v);
}
func Alloc(n uintptr) (p *byte) {
p = runtime·malloc(n);
}
func Free(p *byte) {
runtime·free(p);
}
func Lookup(p *byte) (base *byte, size uintptr) {
runtime·mlookup(p, &base, &size, nil, nil);
}
func GC() {
runtime·gc(1);
}
func SetFinalizer(obj Eface, finalizer Eface) {
byte *base;
uintptr size;
FuncType *ft;
int32 i, nret;
Type *t;
if(obj.type == nil) {
runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
throw:
runtime·throw("runtime.SetFinalizer");
}
if(obj.type->kind != KindPtr) {
runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
goto throw;
}
if(!runtime·mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) {
runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
goto throw;
}
nret = 0;
if(finalizer.type != nil) {
if(finalizer.type->kind != KindFunc) {
badfunc:
runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
goto throw;
}
ft = (FuncType*)finalizer.type;
if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type)
goto badfunc;
// compute size needed for return parameters
for(i=0; i<ft->out.len; i++) {
t = ((Type**)ft->out.array)[i];
nret = (nret + t->align - 1) & ~(t->align - 1);
nret += t->size;
}
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
if(runtime·getfinalizer(obj.data, 0)) {
runtime·printf("runtime.SetFinalizer: finalizer already set");
goto throw;
}
}
runtime·addfinalizer(obj.data, finalizer.data, nret);
}