diff options
Diffstat (limited to 'gcc-4.8.1/libgo/runtime/malloc.goc')
-rw-r--r-- | gcc-4.8.1/libgo/runtime/malloc.goc | 784 |
1 files changed, 0 insertions, 784 deletions
diff --git a/gcc-4.8.1/libgo/runtime/malloc.goc b/gcc-4.8.1/libgo/runtime/malloc.goc deleted file mode 100644 index a48464207..000000000 --- a/gcc-4.8.1/libgo/runtime/malloc.goc +++ /dev/null @@ -1,784 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// See malloc.h for overview. -// -// TODO(rsc): double-check stats. - -package runtime -#include <stddef.h> -#include <errno.h> -#include <stdlib.h> -#include "go-alloc.h" -#include "runtime.h" -#include "arch.h" -#include "malloc.h" -#include "interface.h" -#include "go-type.h" -#include "race.h" - -MHeap runtime_mheap; - -int32 runtime_checking; - -extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go - -extern volatile intgo runtime_MemProfileRate - __asm__ (GOSYM_PREFIX "runtime.MemProfileRate"); - -// Allocate an object of at least size bytes. -// Small objects are allocated from the per-thread cache's free lists. -// Large objects (> 32 kB) are allocated straight from the heap. -void* -runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) -{ - M *m; - G *g; - int32 sizeclass; - intgo rate; - MCache *c; - uintptr npages; - MSpan *s; - void *v; - - m = runtime_m(); - g = runtime_g(); - if(g->status == Gsyscall) - dogc = 0; - if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) { - runtime_gosched(); - m = runtime_m(); - } - if(m->mallocing) - runtime_throw("malloc/free - deadlock"); - m->mallocing = 1; - if(size == 0) - size = 1; - - if(DebugTypeAtBlockEnd) - size += sizeof(uintptr); - - c = m->mcache; - c->local_nmalloc++; - if(size <= MaxSmallSize) { - // Allocate from mcache free lists. - sizeclass = runtime_SizeToClass(size); - size = runtime_class_to_size[sizeclass]; - v = runtime_MCache_Alloc(c, sizeclass, size, zeroed); - if(v == nil) - runtime_throw("out of memory"); - c->local_alloc += size; - c->local_total_alloc += size; - c->local_by_size[sizeclass].nmalloc++; - } else { - // TODO(rsc): Report tracebacks for very large allocations. - - // Allocate directly from heap. - npages = size >> PageShift; - if((size & PageMask) != 0) - npages++; - s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, zeroed); - if(s == nil) - runtime_throw("out of memory"); - size = npages<<PageShift; - c->local_alloc += size; - c->local_total_alloc += size; - v = (void*)(s->start << PageShift); - - // setup for mark sweep - runtime_markspan(v, 0, 0, true); - } - - if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) { - // purge cache stats to prevent overflow - runtime_lock(&runtime_mheap); - runtime_purgecachedstats(c); - runtime_unlock(&runtime_mheap); - } - - if(!(flag & FlagNoGC)) - runtime_markallocated(v, size, (flag&FlagNoPointers) != 0); - - if(DebugTypeAtBlockEnd) - *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0; - - m->mallocing = 0; - - if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) { - if(size >= (uint32) rate) - goto profile; - if((uint32) m->mcache->next_sample > size) - m->mcache->next_sample -= size; - else { - // pick next profile time - // If you change this, also change allocmcache. - if(rate > 0x3fffffff) // make 2*rate not overflow - rate = 0x3fffffff; - m->mcache->next_sample = runtime_fastrand1() % (2*rate); - profile: - runtime_setblockspecial(v, true); - runtime_MProf_Malloc(v, size); - } - } - - if(dogc && mstats.heap_alloc >= mstats.next_gc) - runtime_gc(0); - - if(raceenabled) { - runtime_racemalloc(v, size, m->racepc); - m->racepc = nil; - } - return v; -} - -void* -__go_alloc(uintptr size) -{ - return runtime_mallocgc(size, 0, 0, 1); -} - -// Free the object whose base pointer is v. -void -__go_free(void *v) -{ - M *m; - int32 sizeclass; - MSpan *s; - MCache *c; - uint32 prof; - uintptr size; - - if(v == nil) - return; - - // If you change this also change mgc0.c:/^sweep, - // which has a copy of the guts of free. - - m = runtime_m(); - if(m->mallocing) - runtime_throw("malloc/free - deadlock"); - m->mallocing = 1; - - if(!runtime_mlookup(v, nil, nil, &s)) { - runtime_printf("free %p: not an allocated block\n", v); - runtime_throw("free runtime_mlookup"); - } - prof = runtime_blockspecial(v); - - if(raceenabled) - runtime_racefree(v); - - // Find size class for v. - sizeclass = s->sizeclass; - c = m->mcache; - if(sizeclass == 0) { - // Large object. - size = s->npages<<PageShift; - *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed" - // Must mark v freed before calling unmarkspan and MHeap_Free: - // they might coalesce v into other spans and change the bitmap further. - runtime_markfreed(v, size); - runtime_unmarkspan(v, 1<<PageShift); - runtime_MHeap_Free(&runtime_mheap, s, 1); - } else { - // Small object. - size = runtime_class_to_size[sizeclass]; - if(size > sizeof(uintptr)) - ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" - // Must mark v freed before calling MCache_Free: - // it might coalesce v and other blocks into a bigger span - // and change the bitmap further. - runtime_markfreed(v, size); - c->local_by_size[sizeclass].nfree++; - runtime_MCache_Free(c, v, sizeclass, size); - } - c->local_nfree++; - c->local_alloc -= size; - if(prof) - runtime_MProf_Free(v, size); - m->mallocing = 0; -} - -int32 -runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp) -{ - M *m; - uintptr n, i; - byte *p; - MSpan *s; - - m = runtime_m(); - - m->mcache->local_nlookup++; - if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { - // purge cache stats to prevent overflow - runtime_lock(&runtime_mheap); - runtime_purgecachedstats(m->mcache); - runtime_unlock(&runtime_mheap); - } - - s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); - if(sp) - *sp = s; - if(s == nil) { - runtime_checkfreed(v, 1); - if(base) - *base = nil; - if(size) - *size = 0; - return 0; - } - - p = (byte*)((uintptr)s->start<<PageShift); - if(s->sizeclass == 0) { - // Large object. - if(base) - *base = p; - if(size) - *size = s->npages<<PageShift; - return 1; - } - - if((byte*)v >= (byte*)s->limit) { - // pointers past the last block do not count as pointers. - return 0; - } - - n = s->elemsize; - if(base) { - i = ((byte*)v - p)/n; - *base = p + i*n; - } - if(size) - *size = n; - - return 1; -} - -MCache* -runtime_allocmcache(void) -{ - intgo rate; - MCache *c; - - runtime_lock(&runtime_mheap); - c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc); - mstats.mcache_inuse = runtime_mheap.cachealloc.inuse; - mstats.mcache_sys = runtime_mheap.cachealloc.sys; - runtime_unlock(&runtime_mheap); - runtime_memclr((byte*)c, sizeof(*c)); - - // Set first allocation sample size. - rate = runtime_MemProfileRate; - if(rate > 0x3fffffff) // make 2*rate not overflow - rate = 0x3fffffff; - if(rate != 0) - c->next_sample = runtime_fastrand1() % (2*rate); - - return c; -} - -void -runtime_freemcache(MCache *c) -{ - runtime_MCache_ReleaseAll(c); - runtime_lock(&runtime_mheap); - runtime_purgecachedstats(c); - runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c); - runtime_unlock(&runtime_mheap); -} - -void -runtime_purgecachedstats(MCache *c) -{ - // Protected by either heap or GC lock. - mstats.heap_alloc += c->local_cachealloc; - c->local_cachealloc = 0; - mstats.heap_objects += c->local_objects; - c->local_objects = 0; - mstats.nmalloc += c->local_nmalloc; - c->local_nmalloc = 0; - mstats.nfree += c->local_nfree; - c->local_nfree = 0; - mstats.nlookup += c->local_nlookup; - c->local_nlookup = 0; - mstats.alloc += c->local_alloc; - c->local_alloc= 0; - mstats.total_alloc += c->local_total_alloc; - c->local_total_alloc= 0; -} - -extern uintptr runtime_sizeof_C_MStats - __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats"); - -#define MaxArena32 (2U<<30) - -void -runtime_mallocinit(void) -{ - byte *p; - uintptr arena_size, bitmap_size; - extern byte end[]; - byte *want; - uintptr limit; - - runtime_sizeof_C_MStats = sizeof(MStats); - - p = nil; - arena_size = 0; - bitmap_size = 0; - - // for 64-bit build - USED(p); - USED(arena_size); - USED(bitmap_size); - - runtime_InitSizes(); - - limit = runtime_memlimit(); - - // Set up the allocation arena, a contiguous area of memory where - // allocated data will be found. The arena begins with a bitmap large - // enough to hold 4 bits per allocated word. - if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { - // On a 64-bit machine, allocate from a single contiguous reservation. - // 128 GB (MaxMem) should be big enough for now. - // - // The code will work with the reservation at any address, but ask - // SysReserve to use 0x000000c000000000 if possible. - // Allocating a 128 GB region takes away 37 bits, and the amd64 - // doesn't let us choose the top 17 bits, so that leaves the 11 bits - // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means - // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x0x00df. - // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid - // UTF-8 sequences, and they are otherwise as far away from - // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8 - // caused out of memory errors on OS X during thread allocations. - // These choices are both for debuggability and to reduce the - // odds of the conservative garbage collector not collecting memory - // because some non-pointer block of memory had a bit pattern - // that matched a memory address. - // - // Actually we reserve 136 GB (because the bitmap ends up being 8 GB) - // but it hardly matters: e0 00 is not valid UTF-8 either. - // - // If this fails we fall back to the 32 bit memory mechanism - arena_size = MaxMem; - bitmap_size = arena_size / (sizeof(void*)*8/4); - p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size); - } - if (p == nil) { - // On a 32-bit machine, we can't typically get away - // with a giant virtual address space reservation. - // Instead we map the memory information bitmap - // immediately after the data segment, large enough - // to handle another 2GB of mappings (256 MB), - // along with a reservation for another 512 MB of memory. - // When that gets used up, we'll start asking the kernel - // for any memory anywhere and hope it's in the 2GB - // following the bitmap (presumably the executable begins - // near the bottom of memory, so we'll have to use up - // most of memory before the kernel resorts to giving out - // memory before the beginning of the text segment). - // - // Alternatively we could reserve 512 MB bitmap, enough - // for 4GB of mappings, and then accept any memory the - // kernel threw at us, but normally that's a waste of 512 MB - // of address space, which is probably too much in a 32-bit world. - bitmap_size = MaxArena32 / (sizeof(void*)*8/4); - arena_size = 512<<20; - if(limit > 0 && arena_size+bitmap_size > limit) { - bitmap_size = (limit / 9) & ~((1<<PageShift) - 1); - arena_size = bitmap_size * 8; - } - - // SysReserve treats the address we ask for, end, as a hint, - // not as an absolute requirement. If we ask for the end - // of the data segment but the operating system requires - // a little more space before we can start allocating, it will - // give out a slightly higher pointer. Except QEMU, which - // is buggy, as usual: it won't adjust the pointer upward. - // So adjust it upward a little bit ourselves: 1/4 MB to get - // away from the running binary image and then round up - // to a MB boundary. - want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1)); - if(0xffffffff - (uintptr)want <= bitmap_size + arena_size) - want = 0; - p = runtime_SysReserve(want, bitmap_size + arena_size); - if(p == nil) - runtime_throw("runtime: cannot reserve arena virtual address space"); - if((uintptr)p & (((uintptr)1<<PageShift)-1)) - runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, bitmap_size+arena_size); - } - if((uintptr)p & (((uintptr)1<<PageShift)-1)) - runtime_throw("runtime: SysReserve returned unaligned address"); - - runtime_mheap.bitmap = p; - runtime_mheap.arena_start = p + bitmap_size; - runtime_mheap.arena_used = runtime_mheap.arena_start; - runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size; - - // Initialize the rest of the allocator. - runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc); - runtime_m()->mcache = runtime_allocmcache(); - - // See if it works. - runtime_free(runtime_malloc(1)); -} - -void* -runtime_MHeap_SysAlloc(MHeap *h, uintptr n) -{ - byte *p; - - - if(n > (uintptr)(h->arena_end - h->arena_used)) { - // We are in 32-bit mode, maybe we didn't use all possible address space yet. - // Reserve some more space. - byte *new_end; - uintptr needed; - - needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end; - // Round wanted arena size to a multiple of 256MB. - needed = (needed + (256<<20) - 1) & ~((256<<20)-1); - new_end = h->arena_end + needed; - if(new_end <= h->arena_start + MaxArena32) { - p = runtime_SysReserve(h->arena_end, new_end - h->arena_end); - if(p == h->arena_end) - h->arena_end = new_end; - } - } - if(n <= (uintptr)(h->arena_end - h->arena_used)) { - // Keep taking from our reservation. - p = h->arena_used; - runtime_SysMap(p, n); - h->arena_used += n; - runtime_MHeap_MapBits(h); - if(raceenabled) - runtime_racemapshadow(p, n); - return p; - } - - // If using 64-bit, our reservation is all we have. - if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) - return nil; - - // On 32-bit, once the reservation is gone we can - // try to get memory at a location chosen by the OS - // and hope that it is in the range we allocated bitmap for. - p = runtime_SysAlloc(n); - if(p == nil) - return nil; - - if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) { - runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", - p, h->arena_start, h->arena_start+MaxArena32); - runtime_SysFree(p, n); - return nil; - } - - if(p+n > h->arena_used) { - h->arena_used = p+n; - if(h->arena_used > h->arena_end) - h->arena_end = h->arena_used; - runtime_MHeap_MapBits(h); - if(raceenabled) - runtime_racemapshadow(p, n); - } - - return p; -} - -static Lock settype_lock; - -void -runtime_settype_flush(M *mp, bool sysalloc) -{ - uintptr *buf, *endbuf; - uintptr size, ofs, j, t; - uintptr ntypes, nbytes2, nbytes3; - uintptr *data2; - byte *data3; - bool sysalloc3; - void *v; - uintptr typ, p; - MSpan *s; - - buf = mp->settype_buf; - endbuf = buf + mp->settype_bufsize; - - runtime_lock(&settype_lock); - while(buf < endbuf) { - v = (void*)*buf; - *buf = 0; - buf++; - typ = *buf; - buf++; - - // (Manually inlined copy of runtime_MHeap_Lookup) - p = (uintptr)v>>PageShift; - if(sizeof(void*) == 8) - p -= (uintptr)runtime_mheap.arena_start >> PageShift; - s = runtime_mheap.map[p]; - - if(s->sizeclass == 0) { - s->types.compression = MTypes_Single; - s->types.data = typ; - continue; - } - - size = s->elemsize; - ofs = ((uintptr)v - (s->start<<PageShift)) / size; - - switch(s->types.compression) { - case MTypes_Empty: - ntypes = (s->npages << PageShift) / size; - nbytes3 = 8*sizeof(uintptr) + 1*ntypes; - - if(!sysalloc) { - data3 = runtime_mallocgc(nbytes3, FlagNoPointers, 0, 1); - } else { - data3 = runtime_SysAlloc(nbytes3); - if(0) runtime_printf("settype(0->3): SysAlloc(%x) --> %p\n", (uint32)nbytes3, data3); - } - - s->types.compression = MTypes_Bytes; - s->types.sysalloc = sysalloc; - s->types.data = (uintptr)data3; - - ((uintptr*)data3)[1] = typ; - data3[8*sizeof(uintptr) + ofs] = 1; - break; - - case MTypes_Words: - ((uintptr*)s->types.data)[ofs] = typ; - break; - - case MTypes_Bytes: - data3 = (byte*)s->types.data; - for(j=1; j<8; j++) { - if(((uintptr*)data3)[j] == typ) { - break; - } - if(((uintptr*)data3)[j] == 0) { - ((uintptr*)data3)[j] = typ; - break; - } - } - if(j < 8) { - data3[8*sizeof(uintptr) + ofs] = j; - } else { - ntypes = (s->npages << PageShift) / size; - nbytes2 = ntypes * sizeof(uintptr); - - if(!sysalloc) { - data2 = runtime_mallocgc(nbytes2, FlagNoPointers, 0, 1); - } else { - data2 = runtime_SysAlloc(nbytes2); - if(0) runtime_printf("settype.(3->2): SysAlloc(%x) --> %p\n", (uint32)nbytes2, data2); - } - - sysalloc3 = s->types.sysalloc; - - s->types.compression = MTypes_Words; - s->types.sysalloc = sysalloc; - s->types.data = (uintptr)data2; - - // Move the contents of data3 to data2. Then deallocate data3. - for(j=0; j<ntypes; j++) { - t = data3[8*sizeof(uintptr) + j]; - t = ((uintptr*)data3)[t]; - data2[j] = t; - } - if(sysalloc3) { - nbytes3 = 8*sizeof(uintptr) + 1*ntypes; - if(0) runtime_printf("settype.(3->2): SysFree(%p,%x)\n", data3, (uint32)nbytes3); - runtime_SysFree(data3, nbytes3); - } - - data2[ofs] = typ; - } - break; - } - } - runtime_unlock(&settype_lock); - - mp->settype_bufsize = 0; -} - -// It is forbidden to use this function if it is possible that -// explicit deallocation via calling runtime_free(v) may happen. -void -runtime_settype(void *v, uintptr t) -{ - M *mp; - uintptr *buf; - uintptr i; - MSpan *s; - - if(t == 0) - runtime_throw("settype: zero type"); - - mp = runtime_m(); - buf = mp->settype_buf; - i = mp->settype_bufsize; - buf[i+0] = (uintptr)v; - buf[i+1] = t; - i += 2; - mp->settype_bufsize = i; - - if(i == nelem(mp->settype_buf)) { - runtime_settype_flush(mp, false); - } - - if(DebugTypeAtBlockEnd) { - s = runtime_MHeap_Lookup(&runtime_mheap, v); - *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t; - } -} - -void -runtime_settype_sysfree(MSpan *s) -{ - uintptr ntypes, nbytes; - - if(!s->types.sysalloc) - return; - - nbytes = (uintptr)-1; - - switch (s->types.compression) { - case MTypes_Words: - ntypes = (s->npages << PageShift) / s->elemsize; - nbytes = ntypes * sizeof(uintptr); - break; - case MTypes_Bytes: - ntypes = (s->npages << PageShift) / s->elemsize; - nbytes = 8*sizeof(uintptr) + 1*ntypes; - break; - } - - if(nbytes != (uintptr)-1) { - if(0) runtime_printf("settype: SysFree(%p,%x)\n", (void*)s->types.data, (uint32)nbytes); - runtime_SysFree((void*)s->types.data, nbytes); - } -} - -uintptr -runtime_gettype(void *v) -{ - MSpan *s; - uintptr t, ofs; - byte *data; - - s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); - if(s != nil) { - t = 0; - switch(s->types.compression) { - case MTypes_Empty: - break; - case MTypes_Single: - t = s->types.data; - break; - case MTypes_Words: - ofs = (uintptr)v - (s->start<<PageShift); - t = ((uintptr*)s->types.data)[ofs/s->elemsize]; - break; - case MTypes_Bytes: - ofs = (uintptr)v - (s->start<<PageShift); - data = (byte*)s->types.data; - t = data[8*sizeof(uintptr) + ofs/s->elemsize]; - t = ((uintptr*)data)[t]; - break; - default: - runtime_throw("runtime_gettype: invalid compression kind"); - } - if(0) { - runtime_lock(&settype_lock); - runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t); - runtime_unlock(&settype_lock); - } - return t; - } - return 0; -} - -// Runtime stubs. - -void* -runtime_mal(uintptr n) -{ - return runtime_mallocgc(n, 0, 1, 1); -} - -void * -runtime_new(const Type *typ) -{ - void *ret; - uint32 flag; - - if(raceenabled) - runtime_m()->racepc = runtime_getcallerpc(&typ); - - if(typ->__size == 0) { - // All 0-length allocations use this pointer. - // The language does not require the allocations to - // have distinct values. - ret = (uint8*)&runtime_zerobase; - } else { - flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0; - ret = runtime_mallocgc(typ->__size, flag, 1, 1); - - if(UseSpanType && !flag) { - if(false) { - runtime_printf("new %S: %p\n", *typ->__reflection, ret); - } - runtime_settype(ret, (uintptr)typ | TypeInfo_SingleObject); - } - } - - return ret; -} - -func GC() { - runtime_gc(1); -} - -func SetFinalizer(obj Eface, finalizer Eface) { - byte *base; - uintptr size; - const FuncType *ft; - - if(obj.__type_descriptor == nil) { - runtime_printf("runtime.SetFinalizer: first argument is nil interface\n"); - goto throw; - } - if(obj.__type_descriptor->__code != GO_PTR) { - runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection); - goto throw; - } - if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) { - runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); - goto throw; - } - ft = nil; - if(finalizer.__type_descriptor != nil) { - if(finalizer.__type_descriptor->__code != GO_FUNC) - goto badfunc; - ft = (const FuncType*)finalizer.__type_descriptor; - if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor)) - goto badfunc; - } - - if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft)) { - runtime_printf("runtime.SetFinalizer: finalizer already set\n"); - goto throw; - } - return; - -badfunc: - runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.__type_descriptor->__reflection, *obj.__type_descriptor->__reflection); -throw: - runtime_throw("runtime.SetFinalizer"); -} |