blob: 604290b93b99b66c1802772ad6e167fd8c46df72 [file] [log] [blame]
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// See malloc.h for overview.
//
// TODO(rsc): double-check stats.
package runtime
#include "runtime.h"
#include "malloc.h"
#include "defs.h"
#include "type.h"
MHeap mheap;
MStats mstats;
extern volatile int32 ·MemProfileRate;
// Same algorithm from chan.c, but a different
// instance of the static uint32 x.
// Not protected by a lock - let the threads use
// the same random number if they like.
static uint32
fastrand1(void)
{
static uint32 x = 0x49f6428aUL;
x += x;
if(x & 0x80000000L)
x ^= 0x88888eefUL;
return x;
}
// Allocate an object of at least size bytes.
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
void*
mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
{
int32 sizeclass, rate;
MCache *c;
uintptr npages;
MSpan *s;
void *v;
uint32 *ref;
if(gcwaiting && g != m->g0 && m->locks == 0)
gosched();
if(m->mallocing)
throw("malloc/free - deadlock");
m->mallocing = 1;
if(size == 0)
size = 1;
mstats.nmalloc++;
if(size <= MaxSmallSize) {
// Allocate from mcache free lists.
sizeclass = SizeToClass(size);
size = class_to_size[sizeclass];
c = m->mcache;
v = MCache_Alloc(c, sizeclass, size, zeroed);
if(v == nil)
throw("out of memory");
mstats.alloc += size;
mstats.total_alloc += size;
mstats.by_size[sizeclass].nmalloc++;
if(!mlookup(v, nil, nil, nil, &ref)) {
printf("malloc %D; mlookup failed\n", (uint64)size);
throw("malloc mlookup");
}
*ref = RefNone | refflag;
} else {
// TODO(rsc): Report tracebacks for very large allocations.
// Allocate directly from heap.
npages = size >> PageShift;
if((size & PageMask) != 0)
npages++;
s = MHeap_Alloc(&mheap, npages, 0, 1);
if(s == nil)
throw("out of memory");
size = npages<<PageShift;
mstats.alloc += size;
mstats.total_alloc += size;
v = (void*)(s->start << PageShift);
// setup for mark sweep
s->gcref0 = RefNone | refflag;
ref = &s->gcref0;
}
m->mallocing = 0;
if(!(refflag & RefNoProfiling) && (rate = ·MemProfileRate) > 0) {
if(size >= rate)
goto profile;
if(m->mcache->next_sample > size)
m->mcache->next_sample -= size;
else {
// pick next profile time
if(rate > 0x3fffffff) // make 2*rate not overflow
rate = 0x3fffffff;
m->mcache->next_sample = fastrand1() % (2*rate);
profile:
*ref |= RefProfiled;
MProf_Malloc(v, size);
}
}
if(dogc && mstats.heap_alloc >= mstats.next_gc)
gc(0);
return v;
}
void*
malloc(uintptr size)
{
return mallocgc(size, 0, 0, 1);
}
// Free the object whose base pointer is v.
void
free(void *v)
{
int32 sizeclass, size;
MSpan *s;
MCache *c;
uint32 prof, *ref;
if(v == nil)
return;
if(m->mallocing)
throw("malloc/free - deadlock");
m->mallocing = 1;
if(!mlookup(v, nil, nil, &s, &ref)) {
printf("free %p: not an allocated block\n", v);
throw("free mlookup");
}
prof = *ref & RefProfiled;
*ref = RefFree;
// Find size class for v.
sizeclass = s->sizeclass;
if(sizeclass == 0) {
// Large object.
if(prof)
MProf_Free(v, s->npages<<PageShift);
mstats.alloc -= s->npages<<PageShift;
runtime_memclr(v, s->npages<<PageShift);
MHeap_Free(&mheap, s, 1);
} else {
// Small object.
c = m->mcache;
size = class_to_size[sizeclass];
if(size > sizeof(uintptr))
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
if(prof)
MProf_Free(v, size);
mstats.alloc -= size;
mstats.by_size[sizeclass].nfree++;
MCache_Free(c, v, sizeclass, size);
}
m->mallocing = 0;
}
int32
mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
{
uintptr n, nobj, i;
byte *p;
MSpan *s;
mstats.nlookup++;
s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
if(sp)
*sp = s;
if(s == nil) {
if(base)
*base = nil;
if(size)
*size = 0;
if(ref)
*ref = 0;
return 0;
}
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
// Large object.
if(base)
*base = p;
if(size)
*size = s->npages<<PageShift;
if(ref)
*ref = &s->gcref0;
return 1;
}
if((byte*)v >= (byte*)s->gcref) {
// pointers into the gc ref counts
// do not count as pointers.
return 0;
}
n = class_to_size[s->sizeclass];
i = ((byte*)v - p)/n;
if(base)
*base = p + i*n;
if(size)
*size = n;
// good for error checking, but expensive
if(0) {
nobj = (s->npages << PageShift) / (n + RefcountOverhead);
if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
(uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
throw("bad gcref");
}
}
if(ref)
*ref = &s->gcref[i];
return 1;
}
MCache*
allocmcache(void)
{
MCache *c;
lock(&mheap);
c = FixAlloc_Alloc(&mheap.cachealloc);
mstats.mcache_inuse = mheap.cachealloc.inuse;
mstats.mcache_sys = mheap.cachealloc.sys;
unlock(&mheap);
return c;
}
void
mallocinit(void)
{
SysMemInit();
InitSizes();
MHeap_Init(&mheap, SysAlloc);
m->mcache = allocmcache();
// See if it works.
free(malloc(1));
}
// Runtime stubs.
void*
mal(uintptr n)
{
return mallocgc(n, 0, 1, 1);
}
func mal(n uint32) (ret *uint8) {
ret = mal(n);
}
// Stack allocator uses malloc/free most of the time,
// but if we're in the middle of malloc and need stack,
// we have to do something else to avoid deadlock.
// In that case, we fall back on a fixed-size free-list
// allocator, assuming that inside malloc all the stack
// frames are small, so that all the stack allocations
// will be a single size, the minimum (right now, 5k).
struct {
Lock;
FixAlloc;
} stacks;
void*
stackalloc(uint32 n)
{
void *v;
uint32 *ref;
if(m->mallocing || m->gcing) {
lock(&stacks);
if(stacks.size == 0)
FixAlloc_Init(&stacks, n, SysAlloc, nil, nil);
if(stacks.size != n) {
printf("stackalloc: in malloc, size=%D want %d", (uint64)stacks.size, n);
throw("stackalloc");
}
v = FixAlloc_Alloc(&stacks);
mstats.stacks_inuse = stacks.inuse;
mstats.stacks_sys = stacks.sys;
unlock(&stacks);
return v;
}
v = mallocgc(n, RefNoProfiling, 0, 0);
if(!mlookup(v, nil, nil, nil, &ref))
throw("stackalloc mlookup");
*ref = RefStack;
return v;
}
void
stackfree(void *v)
{
if(m->mallocing || m->gcing) {
lock(&stacks);
FixAlloc_Free(&stacks, v);
mstats.stacks_inuse = stacks.inuse;
mstats.stacks_sys = stacks.sys;
unlock(&stacks);
return;
}
free(v);
}
func Alloc(n uintptr) (p *byte) {
p = malloc(n);
}
func Free(p *byte) {
free(p);
}
func Lookup(p *byte) (base *byte, size uintptr) {
mlookup(p, &base, &size, nil, nil);
}
func GC() {
gc(1);
}
func SetFinalizer(obj Eface, finalizer Eface) {
byte *base;
uintptr size;
FuncType *ft;
int32 i, nret;
Type *t;
if(obj.type == nil) {
printf("runtime.SetFinalizer: first argument is nil interface\n");
throw:
throw("runtime.SetFinalizer");
}
if(obj.type->kind != KindPtr) {
printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
goto throw;
}
if(!mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) {
printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
goto throw;
}
nret = 0;
if(finalizer.type != nil) {
if(finalizer.type->kind != KindFunc) {
badfunc:
printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
goto throw;
}
ft = (FuncType*)finalizer.type;
if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type)
goto badfunc;
// compute size needed for return parameters
for(i=0; i<ft->out.len; i++) {
t = ((Type**)ft->out.array)[i];
nret = (nret + t->align - 1) & ~(t->align - 1);
nret += t->size;
}
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
if(getfinalizer(obj.data, 0)) {
printf("runtime.SetFinalizer: finalizer already set");
goto throw;
}
}
addfinalizer(obj.data, finalizer.data, nret);
}