aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/libgo/runtime/malloc.goc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/libgo/runtime/malloc.goc')
-rw-r--r--gcc-4.9/libgo/runtime/malloc.goc859
1 files changed, 859 insertions, 0 deletions
diff --git a/gcc-4.9/libgo/runtime/malloc.goc b/gcc-4.9/libgo/runtime/malloc.goc
new file mode 100644
index 000000000..7120457a5
--- /dev/null
+++ b/gcc-4.9/libgo/runtime/malloc.goc
@@ -0,0 +1,859 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See malloc.h for overview.
+//
+// TODO(rsc): double-check stats.
+
+package runtime
+#include <stddef.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "go-alloc.h"
+#include "runtime.h"
+#include "arch.h"
+#include "malloc.h"
+#include "interface.h"
+#include "go-type.h"
+#include "race.h"
+
+// Map gccgo field names to gc field names.
+// Eface aka __go_empty_interface.
+#define type __type_descriptor
+// Type aka __go_type_descriptor
+#define kind __code
+#define string __reflection
+#define KindPtr GO_PTR
+#define KindNoPointers GO_NO_POINTERS
+
+// GCCGO SPECIFIC CHANGE
+//
+// There is a long comment in runtime_mallocinit about where to put the heap
+// on a 64-bit system. It makes assumptions that are not valid on linux/arm64
+// -- it assumes user space can choose the lower 47 bits of a pointer, but on
+// linux/arm64 we can only choose the lower 39 bits. This means the heap is
+// roughly a quarter of the available address space and we cannot choose a bit
+// pattern that all pointers will have -- luckily the GC is mostly precise
+// these days so this doesn't matter all that much. The kernel (as of 3.13)
+// will allocate address space starting either down from 0x7fffffffff or up
+// from 0x2000000000, so we put the heap roughly in the middle of these two
+// addresses to minimize the chance that a non-heap allocation will get in the
+// way of the heap.
+//
+// This all means that there isn't much point in trying 256 different
+// locations for the heap on such systems.
+#ifdef __aarch64__
+#define HeapBase(i) ((void*)(uintptr)(0x40ULL<<32))
+#define HeapBaseOptions 1
+#else
+#define HeapBase(i) ((void*)(uintptr)(i<<40|0x00c0ULL<<32))
+#define HeapBaseOptions 0x80
+#endif
+// END GCCGO SPECIFIC CHANGE
+
+// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
+MHeap runtime_mheap;
+
+int32 runtime_checking;
+
+extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
+
+extern volatile intgo runtime_MemProfileRate
+ __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
+
+// Allocate an object of at least size bytes.
+// Small objects are allocated from the per-thread cache's free lists.
+// Large objects (> 32 kB) are allocated straight from the heap.
+// If the block will be freed with runtime_free(), typ must be 0.
+void*
+runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
+{
+ M *m;
+ G *g;
+ int32 sizeclass;
+ intgo rate;
+ MCache *c;
+ MCacheList *l;
+ uintptr npages;
+ MSpan *s;
+ MLink *v;
+ bool incallback;
+
+ if(size == 0) {
+ // All 0-length allocations use this pointer.
+ // The language does not require the allocations to
+ // have distinct values.
+ return &runtime_zerobase;
+ }
+
+ m = runtime_m();
+ g = runtime_g();
+
+ incallback = false;
+ if(m->mcache == nil && g->ncgo > 0) {
+ // For gccgo this case can occur when a cgo or SWIG function
+ // has an interface return type and the function
+ // returns a non-pointer, so memory allocation occurs
+ // after syscall.Cgocall but before syscall.CgocallDone.
+ // We treat it as a callback.
+ runtime_exitsyscall();
+ m = runtime_m();
+ incallback = true;
+ flag |= FlagNoInvokeGC;
+ }
+
+ if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) {
+ runtime_gosched();
+ m = runtime_m();
+ }
+ if(m->mallocing)
+ runtime_throw("malloc/free - deadlock");
+ // Disable preemption during settype_flush.
+ // We can not use m->mallocing for this, because settype_flush calls mallocgc.
+ m->locks++;
+ m->mallocing = 1;
+
+ if(DebugTypeAtBlockEnd)
+ size += sizeof(uintptr);
+
+ c = m->mcache;
+ if(size <= MaxSmallSize) {
+ // Allocate from mcache free lists.
+ // Inlined version of SizeToClass().
+ if(size <= 1024-8)
+ sizeclass = runtime_size_to_class8[(size+7)>>3];
+ else
+ sizeclass = runtime_size_to_class128[(size-1024+127) >> 7];
+ size = runtime_class_to_size[sizeclass];
+ l = &c->list[sizeclass];
+ if(l->list == nil)
+ runtime_MCache_Refill(c, sizeclass);
+ v = l->list;
+ l->list = v->next;
+ l->nlist--;
+ if(!(flag & FlagNoZero)) {
+ v->next = nil;
+ // block is zeroed iff second word is zero ...
+ if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
+ runtime_memclr((byte*)v, size);
+ }
+ c->local_cachealloc += size;
+ } else {
+ // TODO(rsc): Report tracebacks for very large allocations.
+
+ // Allocate directly from heap.
+ npages = size >> PageShift;
+ if((size & PageMask) != 0)
+ npages++;
+ s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero));
+ if(s == nil)
+ runtime_throw("out of memory");
+ s->limit = (byte*)(s->start<<PageShift) + size;
+ size = npages<<PageShift;
+ v = (void*)(s->start << PageShift);
+
+ // setup for mark sweep
+ runtime_markspan(v, 0, 0, true);
+ }
+
+ if(!(flag & FlagNoGC))
+ runtime_markallocated(v, size, (flag&FlagNoScan) != 0);
+
+ if(DebugTypeAtBlockEnd)
+ *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
+
+ // TODO: save type even if FlagNoScan? Potentially expensive but might help
+ // heap profiling/tracing.
+ if(UseSpanType && !(flag & FlagNoScan) && typ != 0) {
+ uintptr *buf, i;
+
+ buf = m->settype_buf;
+ i = m->settype_bufsize;
+ buf[i++] = (uintptr)v;
+ buf[i++] = typ;
+ m->settype_bufsize = i;
+ }
+
+ m->mallocing = 0;
+ if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf))
+ runtime_settype_flush(m);
+ m->locks--;
+
+ if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
+ if(size >= (uint32) rate)
+ goto profile;
+ if((uint32) m->mcache->next_sample > size)
+ m->mcache->next_sample -= size;
+ else {
+ // pick next profile time
+ // If you change this, also change allocmcache.
+ if(rate > 0x3fffffff) // make 2*rate not overflow
+ rate = 0x3fffffff;
+ m->mcache->next_sample = runtime_fastrand1() % (2*rate);
+ profile:
+ runtime_setblockspecial(v, true);
+ runtime_MProf_Malloc(v, size);
+ }
+ }
+
+ if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
+ runtime_gc(0);
+
+ if(raceenabled)
+ runtime_racemalloc(v, size);
+
+ if(incallback)
+ runtime_entersyscall();
+
+ return v;
+}
+
+void*
+__go_alloc(uintptr size)
+{
+ return runtime_mallocgc(size, 0, FlagNoInvokeGC);
+}
+
+// Free the object whose base pointer is v.
+void
+__go_free(void *v)
+{
+ M *m;
+ int32 sizeclass;
+ MSpan *s;
+ MCache *c;
+ uint32 prof;
+ uintptr size;
+
+ if(v == nil)
+ return;
+
+ // If you change this also change mgc0.c:/^sweep,
+ // which has a copy of the guts of free.
+
+ m = runtime_m();
+ if(m->mallocing)
+ runtime_throw("malloc/free - deadlock");
+ m->mallocing = 1;
+
+ if(!runtime_mlookup(v, nil, nil, &s)) {
+ runtime_printf("free %p: not an allocated block\n", v);
+ runtime_throw("free runtime_mlookup");
+ }
+ prof = runtime_blockspecial(v);
+
+ if(raceenabled)
+ runtime_racefree(v);
+
+ // Find size class for v.
+ sizeclass = s->sizeclass;
+ c = m->mcache;
+ if(sizeclass == 0) {
+ // Large object.
+ size = s->npages<<PageShift;
+ *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
+ // Must mark v freed before calling unmarkspan and MHeap_Free:
+ // they might coalesce v into other spans and change the bitmap further.
+ runtime_markfreed(v, size);
+ runtime_unmarkspan(v, 1<<PageShift);
+ runtime_MHeap_Free(&runtime_mheap, s, 1);
+ c->local_nlargefree++;
+ c->local_largefree += size;
+ } else {
+ // Small object.
+ size = runtime_class_to_size[sizeclass];
+ if(size > sizeof(uintptr))
+ ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
+ // Must mark v freed before calling MCache_Free:
+ // it might coalesce v and other blocks into a bigger span
+ // and change the bitmap further.
+ runtime_markfreed(v, size);
+ c->local_nsmallfree[sizeclass]++;
+ runtime_MCache_Free(c, v, sizeclass, size);
+ }
+ if(prof)
+ runtime_MProf_Free(v, size);
+ m->mallocing = 0;
+}
+
+int32
+runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
+{
+ M *m;
+ uintptr n, i;
+ byte *p;
+ MSpan *s;
+
+ m = runtime_m();
+
+ m->mcache->local_nlookup++;
+ if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
+ // purge cache stats to prevent overflow
+ runtime_lock(&runtime_mheap);
+ runtime_purgecachedstats(m->mcache);
+ runtime_unlock(&runtime_mheap);
+ }
+
+ s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
+ if(sp)
+ *sp = s;
+ if(s == nil) {
+ runtime_checkfreed(v, 1);
+ if(base)
+ *base = nil;
+ if(size)
+ *size = 0;
+ return 0;
+ }
+
+ p = (byte*)((uintptr)s->start<<PageShift);
+ if(s->sizeclass == 0) {
+ // Large object.
+ if(base)
+ *base = p;
+ if(size)
+ *size = s->npages<<PageShift;
+ return 1;
+ }
+
+ n = s->elemsize;
+ if(base) {
+ i = ((byte*)v - p)/n;
+ *base = p + i*n;
+ }
+ if(size)
+ *size = n;
+
+ return 1;
+}
+
+MCache*
+runtime_allocmcache(void)
+{
+ intgo rate;
+ MCache *c;
+
+ runtime_lock(&runtime_mheap);
+ c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
+ runtime_unlock(&runtime_mheap);
+ runtime_memclr((byte*)c, sizeof(*c));
+
+ // Set first allocation sample size.
+ rate = runtime_MemProfileRate;
+ if(rate > 0x3fffffff) // make 2*rate not overflow
+ rate = 0x3fffffff;
+ if(rate != 0)
+ c->next_sample = runtime_fastrand1() % (2*rate);
+
+ return c;
+}
+
+void
+runtime_freemcache(MCache *c)
+{
+ runtime_MCache_ReleaseAll(c);
+ runtime_lock(&runtime_mheap);
+ runtime_purgecachedstats(c);
+ runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
+ runtime_unlock(&runtime_mheap);
+}
+
+void
+runtime_purgecachedstats(MCache *c)
+{
+ MHeap *h;
+ int32 i;
+
+ // Protected by either heap or GC lock.
+ h = &runtime_mheap;
+ mstats.heap_alloc += c->local_cachealloc;
+ c->local_cachealloc = 0;
+ mstats.nlookup += c->local_nlookup;
+ c->local_nlookup = 0;
+ h->largefree += c->local_largefree;
+ c->local_largefree = 0;
+ h->nlargefree += c->local_nlargefree;
+ c->local_nlargefree = 0;
+ for(i=0; i<(int32)nelem(c->local_nsmallfree); i++) {
+ h->nsmallfree[i] += c->local_nsmallfree[i];
+ c->local_nsmallfree[i] = 0;
+ }
+}
+
+extern uintptr runtime_sizeof_C_MStats
+ __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
+
+#define MaxArena32 (2U<<30)
+
+void
+runtime_mallocinit(void)
+{
+ byte *p;
+ uintptr arena_size, bitmap_size, spans_size;
+ extern byte _end[];
+ byte *want;
+ uintptr limit;
+ uint64 i;
+
+ runtime_sizeof_C_MStats = sizeof(MStats);
+
+ p = nil;
+ arena_size = 0;
+ bitmap_size = 0;
+ spans_size = 0;
+
+ // for 64-bit build
+ USED(p);
+ USED(arena_size);
+ USED(bitmap_size);
+ USED(spans_size);
+
+ runtime_InitSizes();
+
+ // limit = runtime_memlimit();
+ // See https://code.google.com/p/go/issues/detail?id=5049
+ // TODO(rsc): Fix after 1.1.
+ limit = 0;
+
+ // Set up the allocation arena, a contiguous area of memory where
+ // allocated data will be found. The arena begins with a bitmap large
+ // enough to hold 4 bits per allocated word.
+ if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
+ // On a 64-bit machine, allocate from a single contiguous reservation.
+ // 128 GB (MaxMem) should be big enough for now.
+ //
+ // The code will work with the reservation at any address, but ask
+ // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
+ // Allocating a 128 GB region takes away 37 bits, and the amd64
+ // doesn't let us choose the top 17 bits, so that leaves the 11 bits
+ // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
+ // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
+ // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
+ // UTF-8 sequences, and they are otherwise as far away from
+ // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
+ // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
+ // on OS X during thread allocations. 0x00c0 causes conflicts with
+ // AddressSanitizer which reserves all memory up to 0x0100.
+ // These choices are both for debuggability and to reduce the
+ // odds of the conservative garbage collector not collecting memory
+ // because some non-pointer block of memory had a bit pattern
+ // that matched a memory address.
+ //
+ // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
+ // but it hardly matters: e0 00 is not valid UTF-8 either.
+ //
+ // If this fails we fall back to the 32 bit memory mechanism
+ arena_size = MaxMem;
+ bitmap_size = arena_size / (sizeof(void*)*8/4);
+ spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
+ spans_size = ROUND(spans_size, PageSize);
+ for(i = 0; i < HeapBaseOptions; i++) {
+ p = runtime_SysReserve(HeapBase(i), bitmap_size + spans_size + arena_size);
+ if(p != nil)
+ break;
+ }
+ }
+ if (p == nil) {
+ // On a 32-bit machine, we can't typically get away
+ // with a giant virtual address space reservation.
+ // Instead we map the memory information bitmap
+ // immediately after the data segment, large enough
+ // to handle another 2GB of mappings (256 MB),
+ // along with a reservation for another 512 MB of memory.
+ // When that gets used up, we'll start asking the kernel
+ // for any memory anywhere and hope it's in the 2GB
+ // following the bitmap (presumably the executable begins
+ // near the bottom of memory, so we'll have to use up
+ // most of memory before the kernel resorts to giving out
+ // memory before the beginning of the text segment).
+ //
+ // Alternatively we could reserve 512 MB bitmap, enough
+ // for 4GB of mappings, and then accept any memory the
+ // kernel threw at us, but normally that's a waste of 512 MB
+ // of address space, which is probably too much in a 32-bit world.
+ bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
+ arena_size = 512<<20;
+ spans_size = MaxArena32 / PageSize * sizeof(runtime_mheap.spans[0]);
+ if(limit > 0 && arena_size+bitmap_size+spans_size > limit) {
+ bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
+ arena_size = bitmap_size * 8;
+ spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
+ }
+ spans_size = ROUND(spans_size, PageSize);
+
+ // SysReserve treats the address we ask for, end, as a hint,
+ // not as an absolute requirement. If we ask for the end
+ // of the data segment but the operating system requires
+ // a little more space before we can start allocating, it will
+ // give out a slightly higher pointer. Except QEMU, which
+ // is buggy, as usual: it won't adjust the pointer upward.
+ // So adjust it upward a little bit ourselves: 1/4 MB to get
+ // away from the running binary image and then round up
+ // to a MB boundary.
+ want = (byte*)ROUND((uintptr)_end + (1<<18), 1<<20);
+ if(0xffffffff - (uintptr)want <= bitmap_size + spans_size + arena_size)
+ want = 0;
+ p = runtime_SysReserve(want, bitmap_size + spans_size + arena_size);
+ if(p == nil)
+ runtime_throw("runtime: cannot reserve arena virtual address space");
+ if((uintptr)p & (((uintptr)1<<PageShift)-1))
+ runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p,
+ bitmap_size+spans_size+arena_size);
+ }
+ if((uintptr)p & (((uintptr)1<<PageShift)-1))
+ runtime_throw("runtime: SysReserve returned unaligned address");
+
+ runtime_mheap.spans = (MSpan**)p;
+ runtime_mheap.bitmap = p + spans_size;
+ runtime_mheap.arena_start = p + spans_size + bitmap_size;
+ runtime_mheap.arena_used = runtime_mheap.arena_start;
+ runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size;
+
+ // Initialize the rest of the allocator.
+ runtime_MHeap_Init(&runtime_mheap);
+ runtime_m()->mcache = runtime_allocmcache();
+
+ // See if it works.
+ runtime_free(runtime_malloc(1));
+}
+
+void*
+runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
+{
+ byte *p;
+
+
+ if(n > (uintptr)(h->arena_end - h->arena_used)) {
+ // We are in 32-bit mode, maybe we didn't use all possible address space yet.
+ // Reserve some more space.
+ byte *new_end;
+ uintptr needed;
+
+ needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
+ needed = ROUND(needed, 256<<20);
+ new_end = h->arena_end + needed;
+ if(new_end <= h->arena_start + MaxArena32) {
+ p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
+ if(p == h->arena_end)
+ h->arena_end = new_end;
+ }
+ }
+ if(n <= (uintptr)(h->arena_end - h->arena_used)) {
+ // Keep taking from our reservation.
+ p = h->arena_used;
+ runtime_SysMap(p, n, &mstats.heap_sys);
+ h->arena_used += n;
+ runtime_MHeap_MapBits(h);
+ runtime_MHeap_MapSpans(h);
+ if(raceenabled)
+ runtime_racemapshadow(p, n);
+ return p;
+ }
+
+ // If using 64-bit, our reservation is all we have.
+ if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
+ return nil;
+
+ // On 32-bit, once the reservation is gone we can
+ // try to get memory at a location chosen by the OS
+ // and hope that it is in the range we allocated bitmap for.
+ p = runtime_SysAlloc(n, &mstats.heap_sys);
+ if(p == nil)
+ return nil;
+
+ if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) {
+ runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
+ p, h->arena_start, h->arena_start+MaxArena32);
+ runtime_SysFree(p, n, &mstats.heap_sys);
+ return nil;
+ }
+
+ if(p+n > h->arena_used) {
+ h->arena_used = p+n;
+ if(h->arena_used > h->arena_end)
+ h->arena_end = h->arena_used;
+ runtime_MHeap_MapBits(h);
+ runtime_MHeap_MapSpans(h);
+ if(raceenabled)
+ runtime_racemapshadow(p, n);
+ }
+
+ return p;
+}
+
+static struct
+{
+ Lock;
+ byte* pos;
+ byte* end;
+} persistent;
+
+enum
+{
+ PersistentAllocChunk = 256<<10,
+ PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows
+};
+
+// Wrapper around SysAlloc that can allocate small chunks.
+// There is no associated free operation.
+// Intended for things like function/type/debug-related persistent data.
+// If align is 0, uses default align (currently 8).
+void*
+runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
+{
+ byte *p;
+
+ if(align != 0) {
+ if(align&(align-1))
+ runtime_throw("persistentalloc: align is now a power of 2");
+ if(align > PageSize)
+ runtime_throw("persistentalloc: align is too large");
+ } else
+ align = 8;
+ if(size >= PersistentAllocMaxBlock)
+ return runtime_SysAlloc(size, stat);
+ runtime_lock(&persistent);
+ persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
+ if(persistent.pos + size > persistent.end) {
+ persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
+ if(persistent.pos == nil) {
+ runtime_unlock(&persistent);
+ runtime_throw("runtime: cannot allocate memory");
+ }
+ persistent.end = persistent.pos + PersistentAllocChunk;
+ }
+ p = persistent.pos;
+ persistent.pos += size;
+ runtime_unlock(&persistent);
+ if(stat != &mstats.other_sys) {
+ // reaccount the allocation against provided stat
+ runtime_xadd64(stat, size);
+ runtime_xadd64(&mstats.other_sys, -(uint64)size);
+ }
+ return p;
+}
+
+static Lock settype_lock;
+
+void
+runtime_settype_flush(M *mp)
+{
+ uintptr *buf, *endbuf;
+ uintptr size, ofs, j, t;
+ uintptr ntypes, nbytes2, nbytes3;
+ uintptr *data2;
+ byte *data3;
+ void *v;
+ uintptr typ, p;
+ MSpan *s;
+
+ buf = mp->settype_buf;
+ endbuf = buf + mp->settype_bufsize;
+
+ runtime_lock(&settype_lock);
+ while(buf < endbuf) {
+ v = (void*)*buf;
+ *buf = 0;
+ buf++;
+ typ = *buf;
+ buf++;
+
+ // (Manually inlined copy of runtime_MHeap_Lookup)
+ p = (uintptr)v>>PageShift;
+ p -= (uintptr)runtime_mheap.arena_start >> PageShift;
+ s = runtime_mheap.spans[p];
+
+ if(s->sizeclass == 0) {
+ s->types.compression = MTypes_Single;
+ s->types.data = typ;
+ continue;
+ }
+
+ size = s->elemsize;
+ ofs = ((uintptr)v - (s->start<<PageShift)) / size;
+
+ switch(s->types.compression) {
+ case MTypes_Empty:
+ ntypes = (s->npages << PageShift) / size;
+ nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
+ data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
+ s->types.compression = MTypes_Bytes;
+ s->types.data = (uintptr)data3;
+ ((uintptr*)data3)[1] = typ;
+ data3[8*sizeof(uintptr) + ofs] = 1;
+ break;
+
+ case MTypes_Words:
+ ((uintptr*)s->types.data)[ofs] = typ;
+ break;
+
+ case MTypes_Bytes:
+ data3 = (byte*)s->types.data;
+ for(j=1; j<8; j++) {
+ if(((uintptr*)data3)[j] == typ) {
+ break;
+ }
+ if(((uintptr*)data3)[j] == 0) {
+ ((uintptr*)data3)[j] = typ;
+ break;
+ }
+ }
+ if(j < 8) {
+ data3[8*sizeof(uintptr) + ofs] = j;
+ } else {
+ ntypes = (s->npages << PageShift) / size;
+ nbytes2 = ntypes * sizeof(uintptr);
+ data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
+ s->types.compression = MTypes_Words;
+ s->types.data = (uintptr)data2;
+
+ // Move the contents of data3 to data2. Then deallocate data3.
+ for(j=0; j<ntypes; j++) {
+ t = data3[8*sizeof(uintptr) + j];
+ t = ((uintptr*)data3)[t];
+ data2[j] = t;
+ }
+ data2[ofs] = typ;
+ }
+ break;
+ }
+ }
+ runtime_unlock(&settype_lock);
+
+ mp->settype_bufsize = 0;
+}
+
+uintptr
+runtime_gettype(void *v)
+{
+ MSpan *s;
+ uintptr t, ofs;
+ byte *data;
+
+ s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
+ if(s != nil) {
+ t = 0;
+ switch(s->types.compression) {
+ case MTypes_Empty:
+ break;
+ case MTypes_Single:
+ t = s->types.data;
+ break;
+ case MTypes_Words:
+ ofs = (uintptr)v - (s->start<<PageShift);
+ t = ((uintptr*)s->types.data)[ofs/s->elemsize];
+ break;
+ case MTypes_Bytes:
+ ofs = (uintptr)v - (s->start<<PageShift);
+ data = (byte*)s->types.data;
+ t = data[8*sizeof(uintptr) + ofs/s->elemsize];
+ t = ((uintptr*)data)[t];
+ break;
+ default:
+ runtime_throw("runtime_gettype: invalid compression kind");
+ }
+ if(0) {
+ runtime_lock(&settype_lock);
+ runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
+ runtime_unlock(&settype_lock);
+ }
+ return t;
+ }
+ return 0;
+}
+
+// Runtime stubs.
+
+void*
+runtime_mal(uintptr n)
+{
+ return runtime_mallocgc(n, 0, 0);
+}
+
+void *
+runtime_new(const Type *typ)
+{
+ return runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
+}
+
+static void*
+cnew(const Type *typ, intgo n, int32 objtyp)
+{
+ if((objtyp&(PtrSize-1)) != objtyp)
+ runtime_throw("runtime: invalid objtyp");
+ if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
+ runtime_panicstring("runtime: allocation size out of range");
+ return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
+}
+
+// same as runtime_new, but callable from C
+void*
+runtime_cnew(const Type *typ)
+{
+ return cnew(typ, 1, TypeInfo_SingleObject);
+}
+
+void*
+runtime_cnewarray(const Type *typ, intgo n)
+{
+ return cnew(typ, n, TypeInfo_Array);
+}
+
+func GC() {
+ runtime_gc(1);
+}
+
+func SetFinalizer(obj Eface, finalizer Eface) {
+ byte *base;
+ uintptr size;
+ const FuncType *ft;
+ const Type *fint;
+ const PtrType *ot;
+
+ if(obj.__type_descriptor == nil) {
+ runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
+ goto throw;
+ }
+ if(obj.__type_descriptor->__code != GO_PTR) {
+ runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection);
+ goto throw;
+ }
+ if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
+ runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+ goto throw;
+ }
+ ft = nil;
+ ot = (const PtrType*)obj.__type_descriptor;
+ fint = nil;
+ if(finalizer.__type_descriptor != nil) {
+ if(finalizer.__type_descriptor->__code != GO_FUNC)
+ goto badfunc;
+ ft = (const FuncType*)finalizer.__type_descriptor;
+ if(ft->__dotdotdot || ft->__in.__count != 1)
+ goto badfunc;
+ fint = *(Type**)ft->__in.__values;
+ if(__go_type_descriptors_equal(fint, obj.__type_descriptor)) {
+ // ok - same type
+ } else if(fint->__code == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) {
+ // ok - not same type, but both pointers,
+ // one or the other is unnamed, and same element type, so assignable.
+ } else if(fint->kind == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) {
+ // ok - satisfies empty interface
+ } else if(fint->kind == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) {
+ // ok - satisfies non-empty interface
+ } else
+ goto badfunc;
+ }
+
+ if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft, ot)) {
+ runtime_printf("runtime.SetFinalizer: finalizer already set\n");
+ goto throw;
+ }
+ return;
+
+badfunc:
+ runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.__type_descriptor->__reflection, *finalizer.__type_descriptor->__reflection);
+throw:
+ runtime_throw("runtime.SetFinalizer");
+}