aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.7/libgo/go/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.7/libgo/go/runtime')
-rw-r--r--gcc-4.7/libgo/go/runtime/append_test.go52
-rw-r--r--gcc-4.7/libgo/go/runtime/chan_test.go382
-rw-r--r--gcc-4.7/libgo/go/runtime/closure_test.go53
-rw-r--r--gcc-4.7/libgo/go/runtime/compiler.go13
-rw-r--r--gcc-4.7/libgo/go/runtime/debug.go145
-rw-r--r--gcc-4.7/libgo/go/runtime/debug/stack.go91
-rw-r--r--gcc-4.7/libgo/go/runtime/debug/stack_test.go62
-rw-r--r--gcc-4.7/libgo/go/runtime/error.go107
-rw-r--r--gcc-4.7/libgo/go/runtime/export_test.go25
-rw-r--r--gcc-4.7/libgo/go/runtime/extern.go137
-rw-r--r--gcc-4.7/libgo/go/runtime/gc_test.go43
-rw-r--r--gcc-4.7/libgo/go/runtime/malloc1.go26
-rw-r--r--gcc-4.7/libgo/go/runtime/mallocrand.go93
-rw-r--r--gcc-4.7/libgo/go/runtime/mallocrep.go72
-rw-r--r--gcc-4.7/libgo/go/runtime/mallocrep1.go143
-rw-r--r--gcc-4.7/libgo/go/runtime/mem.go71
-rw-r--r--gcc-4.7/libgo/go/runtime/mfinal_test.go64
-rw-r--r--gcc-4.7/libgo/go/runtime/pprof/pprof.go600
-rw-r--r--gcc-4.7/libgo/go/runtime/pprof/pprof_test.go85
-rw-r--r--gcc-4.7/libgo/go/runtime/proc_test.go125
-rw-r--r--gcc-4.7/libgo/go/runtime/runtime_test.go40
-rw-r--r--gcc-4.7/libgo/go/runtime/softfloat64.go498
-rw-r--r--gcc-4.7/libgo/go/runtime/softfloat64_test.go198
-rw-r--r--gcc-4.7/libgo/go/runtime/symtab_test.go55
-rw-r--r--gcc-4.7/libgo/go/runtime/type.go55
25 files changed, 3235 insertions, 0 deletions
diff --git a/gcc-4.7/libgo/go/runtime/append_test.go b/gcc-4.7/libgo/go/runtime/append_test.go
new file mode 100644
index 000000000..b8552224e
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/append_test.go
@@ -0,0 +1,52 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+const N = 20
+
+func BenchmarkAppend(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ x = append(x, j)
+ }
+ }
+}
+
+func BenchmarkAppendSpecialCase(b *testing.B) {
+ b.StopTimer()
+ x := make([]int, 0, N)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ for j := 0; j < N; j++ {
+ if len(x) < cap(x) {
+ x = x[:len(x)+1]
+ x[len(x)-1] = j
+ } else {
+ x = append(x, j)
+ }
+ }
+ }
+}
+
+var x []int
+
+func f() int {
+ x[:1][0] = 3
+ return 2
+}
+
+func TestSideEffectOrder(t *testing.T) {
+ x = make([]int, 0, 10)
+ x = append(x, 1, f())
+ if x[0] != 1 || x[1] != 2 {
+ t.Error("append failed: ", x[0], x[1])
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/chan_test.go b/gcc-4.7/libgo/go/runtime/chan_test.go
new file mode 100644
index 000000000..eb2c7c60d
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/chan_test.go
@@ -0,0 +1,382 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+func TestChanSendInterface(t *testing.T) {
+ type mt struct{}
+ m := &mt{}
+ c := make(chan interface{}, 1)
+ c <- m
+ select {
+ case c <- m:
+ default:
+ }
+ select {
+ case c <- m:
+ case c <- &mt{}:
+ default:
+ }
+}
+
+func TestPseudoRandomSend(t *testing.T) {
+ n := 100
+ c := make(chan int)
+ l := make([]int, n)
+ var m sync.Mutex
+ m.Lock()
+ go func() {
+ for i := 0; i < n; i++ {
+ runtime.Gosched()
+ l[i] = <-c
+ }
+ m.Unlock()
+ }()
+ for i := 0; i < n; i++ {
+ select {
+ case c <- 0:
+ case c <- 1:
+ }
+ }
+ m.Lock() // wait
+ n0 := 0
+ n1 := 0
+ for _, i := range l {
+ n0 += (i + 1) % 2
+ n1 += i
+ if n0 > n/10 && n1 > n/10 {
+ return
+ }
+ }
+ t.Errorf("Want pseudo random, got %d zeros and %d ones", n0, n1)
+}
+
+func TestMultiConsumer(t *testing.T) {
+ const nwork = 23
+ const niter = 271828
+
+ pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
+
+ q := make(chan int, nwork*3)
+ r := make(chan int, nwork*3)
+
+ // workers
+ var wg sync.WaitGroup
+ for i := 0; i < nwork; i++ {
+ wg.Add(1)
+ go func(w int) {
+ for v := range q {
+ // mess with the fifo-ish nature of range
+ if pn[w%len(pn)] == v {
+ runtime.Gosched()
+ }
+ r <- v
+ }
+ wg.Done()
+ }(i)
+ }
+
+ // feeder & closer
+ expect := 0
+ go func() {
+ for i := 0; i < niter; i++ {
+ v := pn[i%len(pn)]
+ expect += v
+ q <- v
+ }
+ close(q) // no more work
+ wg.Wait() // workers done
+ close(r) // ... so there can be no more results
+ }()
+
+ // consume & check
+ n := 0
+ s := 0
+ for v := range r {
+ n++
+ s += v
+ }
+ if n != niter || s != expect {
+ t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
+ expect, s, niter, n)
+ }
+}
+
+func BenchmarkSelectUncontended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ myc1 := make(chan int, 1)
+ myc2 := make(chan int, 1)
+ myc1 <- 0
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSelectContended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ myc1 := make(chan int, procs)
+ myc2 := make(chan int, procs)
+ for p := 0; p < procs; p++ {
+ myc1 <- 0
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSelectNonblock(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ myc1 := make(chan int)
+ myc2 := make(chan int)
+ myc3 := make(chan int, 1)
+ myc4 := make(chan int, 1)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ select {
+ case <-myc1:
+ default:
+ }
+ select {
+ case myc2 <- 0:
+ default:
+ }
+ select {
+ case <-myc3:
+ default:
+ }
+ select {
+ case myc4 <- 0:
+ default:
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanUncontended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ myc := make(chan int, CallsPerSched)
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc <- 0
+ }
+ for g := 0; g < CallsPerSched; g++ {
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanContended(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ myc := make(chan int, procs*CallsPerSched)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc <- 0
+ }
+ for g := 0; g < CallsPerSched; g++ {
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanSync(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := 2
+ N := int32(b.N / CallsPerSched / procs * procs)
+ c := make(chan bool, procs)
+ myc := make(chan int)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for {
+ i := atomic.AddInt32(&N, -1)
+ if i < 0 {
+ break
+ }
+ for g := 0; g < CallsPerSched; g++ {
+ if i%2 == 0 {
+ <-myc
+ myc <- 0
+ } else {
+ myc <- 0
+ <-myc
+ }
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, 2*procs)
+ myc := make(chan int, chanSize)
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 0
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ myc <- 1
+ }
+ }
+ myc <- 0
+ c <- foo == 42
+ }()
+ go func() {
+ foo := 0
+ for {
+ v := <-myc
+ if v == 0 {
+ break
+ }
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ c <- foo == 42
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ <-c
+ }
+}
+
+func BenchmarkChanProdCons0(b *testing.B) {
+ benchmarkChanProdCons(b, 0, 0)
+}
+
+func BenchmarkChanProdCons10(b *testing.B) {
+ benchmarkChanProdCons(b, 10, 0)
+}
+
+func BenchmarkChanProdCons100(b *testing.B) {
+ benchmarkChanProdCons(b, 100, 0)
+}
+
+func BenchmarkChanProdConsWork0(b *testing.B) {
+ benchmarkChanProdCons(b, 0, 100)
+}
+
+func BenchmarkChanProdConsWork10(b *testing.B) {
+ benchmarkChanProdCons(b, 10, 100)
+}
+
+func BenchmarkChanProdConsWork100(b *testing.B) {
+ benchmarkChanProdCons(b, 100, 100)
+}
+
+func BenchmarkChanCreation(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc := make(chan int, 1)
+ myc <- 0
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkChanSem(b *testing.B) {
+ type Empty struct{}
+ c := make(chan Empty, 1)
+ for i := 0; i < b.N; i++ {
+ c <- Empty{}
+ <-c
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/closure_test.go b/gcc-4.7/libgo/go/runtime/closure_test.go
new file mode 100644
index 000000000..ea65fbd5f
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/closure_test.go
@@ -0,0 +1,53 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package runtime_test
+
+import "testing"
+
+var s int
+
+func BenchmarkCallClosure(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s += func(ii int) int { return 2 * ii }(i)
+ }
+}
+
+func BenchmarkCallClosure1(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ j := i
+ s += func(ii int) int { return 2*ii + j }(i)
+ }
+}
+
+var ss *int
+
+func BenchmarkCallClosure2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ j := i
+ s += func() int {
+ ss = &j
+ return 2
+ }()
+ }
+}
+
+func addr1(x int) *int {
+ return func() *int { return &x }()
+}
+
+func BenchmarkCallClosure3(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ss = addr1(i)
+ }
+}
+
+func addr2() (x int, p *int) {
+ return 0, func() *int { return &x }()
+}
+
+func BenchmarkCallClosure4(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, ss = addr2()
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/compiler.go b/gcc-4.7/libgo/go/runtime/compiler.go
new file mode 100644
index 000000000..0ed3b183f
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/compiler.go
@@ -0,0 +1,13 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Compiler is the name of the compiler toolchain that built the
+// running binary. Known toolchains are:
+//
+// gc The 5g/6g/8g compiler suite at code.google.com/p/go.
+// gccgo The gccgo front end, part of the GCC compiler suite.
+//
+const Compiler = "gccgo"
diff --git a/gcc-4.7/libgo/go/runtime/debug.go b/gcc-4.7/libgo/go/runtime/debug.go
new file mode 100644
index 000000000..b802fc63f
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/debug.go
@@ -0,0 +1,145 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Breakpoint() executes a breakpoint trap.
+func Breakpoint()
+
+// LockOSThread wires the calling goroutine to its current operating system thread.
+// Until the calling goroutine exits or calls UnlockOSThread, it will always
+// execute in that thread, and no other goroutine can.
+func LockOSThread()
+
+// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
+// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
+func UnlockOSThread()
+
+// GOMAXPROCS sets the maximum number of CPUs that can be executing
+// simultaneously and returns the previous setting. If n < 1, it does not
+// change the current setting.
+// The number of logical CPUs on the local machine can be queried with NumCPU.
+// This call will go away when the scheduler improves.
+func GOMAXPROCS(n int) int
+
+// NumCPU returns the number of logical CPUs on the local machine.
+func NumCPU() int
+
+// NumCgoCall returns the number of cgo calls made by the current process.
+func NumCgoCall() int64
+
+// NumGoroutine returns the number of goroutines that currently exist.
+func NumGoroutine() int
+
+// MemProfileRate controls the fraction of memory allocations
+// that are recorded and reported in the memory profile.
+// The profiler aims to sample an average of
+// one allocation per MemProfileRate bytes allocated.
+//
+// To include every allocated block in the profile, set MemProfileRate to 1.
+// To turn off profiling entirely, set MemProfileRate to 0.
+//
+// The tools that process the memory profiles assume that the
+// profile rate is constant across the lifetime of the program
+// and equal to the current value. Programs that change the
+// memory profiling rate should do so just once, as early as
+// possible in the execution of the program (for example,
+// at the beginning of main).
+var MemProfileRate int = 512 * 1024
+
+// A MemProfileRecord describes the live objects allocated
+// by a particular call sequence (stack trace).
+type MemProfileRecord struct {
+ AllocBytes, FreeBytes int64 // number of bytes allocated, freed
+ AllocObjects, FreeObjects int64 // number of objects allocated, freed
+ Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
+}
+
+// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
+func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
+
+// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
+func (r *MemProfileRecord) InUseObjects() int64 {
+ return r.AllocObjects - r.FreeObjects
+}
+
+// Stack returns the stack trace associated with the record,
+// a prefix of r.Stack0.
+func (r *MemProfileRecord) Stack() []uintptr {
+ for i, v := range r.Stack0 {
+ if v == 0 {
+ return r.Stack0[0:i]
+ }
+ }
+ return r.Stack0[0:]
+}
+
+// MemProfile returns n, the number of records in the current memory profile.
+// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
+// If len(p) < n, MemProfile does not change p and returns n, false.
+//
+// If inuseZero is true, the profile includes allocation records
+// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
+// These are sites where memory was allocated, but it has all
+// been released back to the runtime.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.memprofile flag instead
+// of calling MemProfile directly.
+func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
+
+// A StackRecord describes a single execution stack.
+type StackRecord struct {
+ Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
+}
+
+// Stack returns the stack trace associated with the record,
+// a prefix of r.Stack0.
+func (r *StackRecord) Stack() []uintptr {
+ for i, v := range r.Stack0 {
+ if v == 0 {
+ return r.Stack0[0:i]
+ }
+ }
+ return r.Stack0[0:]
+}
+
+// ThreadCreateProfile returns n, the number of records in the thread creation profile.
+// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
+// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package instead
+// of calling ThreadCreateProfile directly.
+func ThreadCreateProfile(p []StackRecord) (n int, ok bool)
+
+// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
+// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
+// If len(p) < n, GoroutineProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package instead
+// of calling GoroutineProfile directly.
+func GoroutineProfile(p []StackRecord) (n int, ok bool)
+
+// CPUProfile returns the next chunk of binary CPU profiling stack trace data,
+// blocking until data is available. If profiling is turned off and all the profile
+// data accumulated while it was on has been returned, CPUProfile returns nil.
+// The caller must save the returned data before calling CPUProfile again.
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.cpuprofile flag instead of calling
+// CPUProfile directly.
+func CPUProfile() []byte
+
+// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
+// If hz <= 0, SetCPUProfileRate turns off profiling.
+// If the profiler is on, the rate cannot be changed without first turning it off.
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.cpuprofile flag instead of calling
+// SetCPUProfileRate directly.
+func SetCPUProfileRate(hz int)
+
+// Stack formats a stack trace of the calling goroutine into buf
+// and returns the number of bytes written to buf.
+// If all is true, Stack formats stack traces of all other goroutines
+// into buf after the trace for the current goroutine.
+func Stack(buf []byte, all bool) int
diff --git a/gcc-4.7/libgo/go/runtime/debug/stack.go b/gcc-4.7/libgo/go/runtime/debug/stack.go
new file mode 100644
index 000000000..fc74e537b
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/debug/stack.go
@@ -0,0 +1,91 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package debug contains facilities for programs to debug themselves while
+// they are running.
+package debug
+
+import (
+ "bytes"
+ _ "debug/elf"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+)
+
+// PrintStack prints to standard error the stack trace returned by Stack.
+func PrintStack() {
+ os.Stderr.Write(stack())
+}
+
+// Stack returns a formatted stack trace of the goroutine that calls it.
+// For each routine, it includes the source line information and PC value,
+// then attempts to discover, for Go functions, the calling function or
+// method and the text of the line containing the invocation.
+func Stack() []byte {
+ return stack()
+}
+
+// stack implements Stack, skipping 2 frames
+func stack() []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := 2; ; i++ { // Caller we care about is the user, 2 frames up
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFile = file
+ }
+ line-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.Trim(lines[n], " \t")
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
diff --git a/gcc-4.7/libgo/go/runtime/debug/stack_test.go b/gcc-4.7/libgo/go/runtime/debug/stack_test.go
new file mode 100644
index 000000000..f33f5072b
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/debug/stack_test.go
@@ -0,0 +1,62 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "strings"
+ "testing"
+)
+
+type T int
+
+func (t *T) ptrmethod() []byte {
+ return Stack()
+}
+func (t T) method() []byte {
+ return t.ptrmethod()
+}
+
+/*
+ The traceback should look something like this, modulo line numbers and hex constants.
+ Don't worry much about the base levels, but check the ones in our own package.
+
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:15 (0x13878)
+ (*T).ptrmethod: return Stack()
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:18 (0x138dd)
+ T.method: return t.ptrmethod()
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:23 (0x13920)
+ TestStack: b := T(0).method()
+ /Users/r/go/src/pkg/testing/testing.go:132 (0x14a7a)
+ tRunner: test.F(t)
+ /Users/r/go/src/pkg/runtime/proc.c:145 (0xc970)
+ ???: runtime·unlock(&runtime·sched);
+*/
+func TestStack(t *testing.T) {
+ b := T(0).method()
+ lines := strings.Split(string(b), "\n")
+ if len(lines) <= 6 {
+ t.Fatal("too few lines")
+ }
+ n := 0
+ frame := func(line, code string) {
+ check(t, lines[n], line)
+ n++
+ // The source might not be available while running the test.
+ if strings.HasPrefix(lines[n], "\t") {
+ check(t, lines[n], code)
+ n++
+ }
+ }
+ frame("src/pkg/runtime/debug/stack_test.go", "\t(*T).ptrmethod: return Stack()")
+ frame("src/pkg/runtime/debug/stack_test.go", "\tT.method: return t.ptrmethod()")
+ frame("src/pkg/runtime/debug/stack_test.go", "\tTestStack: b := T(0).method()")
+ frame("src/pkg/testing/testing.go", "")
+}
+
+func check(t *testing.T, line, has string) {
+ if strings.Index(line, has) < 0 {
+ t.Errorf("expected %q in %q", has, line)
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/error.go b/gcc-4.7/libgo/go/runtime/error.go
new file mode 100644
index 000000000..d3913ec27
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/error.go
@@ -0,0 +1,107 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// The Error interface identifies a run time error.
+type Error interface {
+ error
+
+ // RuntimeError is a no-op function but
+ // serves to distinguish types that are runtime
+ // errors from ordinary errors: a type is a
+ // runtime error if it has a RuntimeError method.
+ RuntimeError()
+}
+
+// A TypeAssertionError explains a failed type assertion.
+type TypeAssertionError struct {
+ interfaceString string
+ concreteString string
+ assertedString string
+ missingMethod string // one method needed by Interface, missing from Concrete
+}
+
+func (*TypeAssertionError) RuntimeError() {}
+
+func (e *TypeAssertionError) Error() string {
+ inter := e.interfaceString
+ if inter == "" {
+ inter = "interface"
+ }
+ if e.concreteString == "" {
+ return "interface conversion: " + inter + " is nil, not " + e.assertedString
+ }
+ if e.missingMethod == "" {
+ return "interface conversion: " + inter + " is " + e.concreteString +
+ ", not " + e.assertedString
+ }
+ return "interface conversion: " + e.concreteString + " is not " + e.assertedString +
+ ": missing method " + e.missingMethod
+}
+
+// For calling from C.
+func NewTypeAssertionError(ps1, ps2, ps3 *string, pmeth *string, ret *interface{}) {
+ var s1, s2, s3, meth string
+
+ if ps1 != nil {
+ s1 = *ps1
+ }
+ if ps2 != nil {
+ s2 = *ps2
+ }
+ if ps3 != nil {
+ s3 = *ps3
+ }
+ if pmeth != nil {
+ meth = *pmeth
+ }
+ *ret = &TypeAssertionError{s1, s2, s3, meth}
+}
+
+// An errorString represents a runtime error described by a single string.
+type errorString string
+
+func (e errorString) RuntimeError() {}
+
+func (e errorString) Error() string {
+ return "runtime error: " + string(e)
+}
+
+// For calling from C.
+func NewErrorString(s string, ret *interface{}) {
+ *ret = errorString(s)
+}
+
+type stringer interface {
+ String() string
+}
+
+func typestring(interface{}) string
+
+// For calling from C.
+// Prints an argument passed to panic.
+// There's room for arbitrary complexity here, but we keep it
+// simple and handle just a few important cases: int, string, and Stringer.
+func Printany(i interface{}) {
+ switch v := i.(type) {
+ case nil:
+ print("nil")
+ case stringer:
+ print(v.String())
+ case error:
+ print(v.Error())
+ case int:
+ print(v)
+ case string:
+ print(v)
+ default:
+ print("(", typestring(i), ") ", i)
+ }
+}
+
+// called from generated code
+func panicwrap(pkg, typ, meth string) {
+ panic("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer")
+}
diff --git a/gcc-4.7/libgo/go/runtime/export_test.go b/gcc-4.7/libgo/go/runtime/export_test.go
new file mode 100644
index 000000000..c603e1b0d
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/export_test.go
@@ -0,0 +1,25 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Export guts for testing.
+
+package runtime
+
+var Fadd64 = fadd64
+var Fsub64 = fsub64
+var Fmul64 = fmul64
+var Fdiv64 = fdiv64
+var F64to32 = f64to32
+var F32to64 = f32to64
+var Fcmp64 = fcmp64
+var Fintto64 = fintto64
+var F64toint = f64toint
+
+func entersyscall()
+func exitsyscall()
+func golockedOSThread() bool
+
+var Entersyscall = entersyscall
+var Exitsyscall = exitsyscall
+var LockedOSThread = golockedOSThread
diff --git a/gcc-4.7/libgo/go/runtime/extern.go b/gcc-4.7/libgo/go/runtime/extern.go
new file mode 100644
index 000000000..09d1391d2
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/extern.go
@@ -0,0 +1,137 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ Package runtime contains operations that interact with Go's runtime system,
+ such as functions to control goroutines. It also includes the low-level type information
+ used by the reflect package; see reflect's documentation for the programmable
+ interface to the run-time type system.
+*/
+package runtime
+
+// Gosched yields the processor, allowing other goroutines to run. It does not
+// suspend the current goroutine, so execution resumes automatically.
+func Gosched()
+
+// Goexit terminates the goroutine that calls it. No other goroutine is affected.
+// Goexit runs all deferred calls before terminating the goroutine.
+func Goexit()
+
+// Caller reports file and line number information about function invocations on
+// the calling goroutine's stack. The argument skip is the number of stack frames
+// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
+// meaning of skip differs between Caller and Callers.) The return values report the
+// program counter, file name, and line number within the file of the corresponding
+// call. The boolean ok is false if it was not possible to recover the information.
+func Caller(skip int) (pc uintptr, file string, line int, ok bool)
+
+// Callers fills the slice pc with the program counters of function invocations
+// on the calling goroutine's stack. The argument skip is the number of stack frames
+// to skip before recording in pc, with 0 identifying the frame for Callers itself and
+// 1 identifying the caller of Callers.
+// It returns the number of entries written to pc.
+func Callers(skip int, pc []uintptr) int
+
+type Func struct { // Keep in sync with runtime.h:struct Func
+ name string
+ entry uintptr // entry pc
+}
+
+// FuncForPC returns a *Func describing the function that contains the
+// given program counter address, or else nil.
+func FuncForPC(pc uintptr) *Func
+
+// Name returns the name of the function.
+func (f *Func) Name() string { return f.name }
+
+// Entry returns the entry address of the function.
+func (f *Func) Entry() uintptr { return f.entry }
+
+// FileLine returns the file name and line number of the
+// source code corresponding to the program counter pc.
+// The result will not be accurate if pc is not a program
+// counter within f.
+func (f *Func) FileLine(pc uintptr) (file string, line int) {
+ return funcline_go(f, pc)
+}
+
+// implemented in symtab.c
+func funcline_go(*Func, uintptr) (string, int)
+
+// A gccgo specific hook to use debug info to get file/line info.
+func RegisterDebugLookup(func(pc uintptr, function *string, file *string, line *int) bool,
+ func(sym string, val *uintptr) bool)
+
+// mid returns the current os thread (m) id.
+func mid() uint32
+
+// SetFinalizer sets the finalizer associated with x to f.
+// When the garbage collector finds an unreachable block
+// with an associated finalizer, it clears the association and runs
+// f(x) in a separate goroutine. This makes x reachable again, but
+// now without an associated finalizer. Assuming that SetFinalizer
+// is not called again, the next time the garbage collector sees
+// that x is unreachable, it will free x.
+//
+// SetFinalizer(x, nil) clears any finalizer associated with x.
+//
+// The argument x must be a pointer to an object allocated by
+// calling new or by taking the address of a composite literal.
+// The argument f must be a function that takes a single argument
+// of x's type and can have arbitrary ignored return values.
+// If either of these is not true, SetFinalizer aborts the program.
+//
+// Finalizers are run in dependency order: if A points at B, both have
+// finalizers, and they are otherwise unreachable, only the finalizer
+// for A runs; once A is freed, the finalizer for B can run.
+// If a cyclic structure includes a block with a finalizer, that
+// cycle is not guaranteed to be garbage collected and the finalizer
+// is not guaranteed to run, because there is no ordering that
+// respects the dependencies.
+//
+// The finalizer for x is scheduled to run at some arbitrary time after
+// x becomes unreachable.
+// There is no guarantee that finalizers will run before a program exits,
+// so typically they are useful only for releasing non-memory resources
+// associated with an object during a long-running program.
+// For example, an os.File object could use a finalizer to close the
+// associated operating system file descriptor when a program discards
+// an os.File without calling Close, but it would be a mistake
+// to depend on a finalizer to flush an in-memory I/O buffer such as a
+// bufio.Writer, because the buffer would not be flushed at program exit.
+//
+// A single goroutine runs all finalizers for a program, sequentially.
+// If a finalizer must run for a long time, it should do so by starting
+// a new goroutine.
+func SetFinalizer(x, f interface{})
+
+func getgoroot() string
+
+// GOROOT returns the root of the Go tree.
+// It uses the GOROOT environment variable, if set,
+// or else the root used during the Go build.
+func GOROOT() string {
+ s := getgoroot()
+ if s != "" {
+ return s
+ }
+ return defaultGoroot
+}
+
+// Version returns the Go tree's version string.
+// It is either a sequence number or, when possible,
+// a release tag like "release.2010-03-04".
+// A trailing + indicates that the tree had local modifications
+// at the time of the build.
+func Version() string {
+ return theVersion
+}
+
+// GOOS is the running program's operating system target:
+// one of darwin, freebsd, linux, and so on.
+const GOOS string = theGoos
+
+// GOARCH is the running program's architecture target:
+// 386, amd64, or arm.
+const GOARCH string = theGoarch
diff --git a/gcc-4.7/libgo/go/runtime/gc_test.go b/gcc-4.7/libgo/go/runtime/gc_test.go
new file mode 100644
index 000000000..7770e499a
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/gc_test.go
@@ -0,0 +1,43 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestGcSys(t *testing.T) {
+ memstats := new(runtime.MemStats)
+ runtime.GC()
+ runtime.ReadMemStats(memstats)
+ sys := memstats.Sys
+
+ runtime.MemProfileRate = 0 // disable profiler
+
+ itercount := 1000000
+ if testing.Short() {
+ itercount = 100000
+ }
+ for i := 0; i < itercount; i++ {
+ workthegc()
+ }
+
+ // Should only be using a few MB.
+ runtime.ReadMemStats(memstats)
+ if sys > memstats.Sys {
+ sys = 0
+ } else {
+ sys = memstats.Sys - sys
+ }
+ t.Logf("used %d extra bytes", sys)
+ if sys > 4<<20 {
+ t.Fatalf("using too much memory: %d bytes", sys)
+ }
+}
+
+func workthegc() []byte {
+ return make([]byte, 1029)
+}
diff --git a/gcc-4.7/libgo/go/runtime/malloc1.go b/gcc-4.7/libgo/go/runtime/malloc1.go
new file mode 100644
index 000000000..da92f4c2f
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/malloc1.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// trivial malloc test
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+)
+
+var chatty = flag.Bool("v", false, "chatty")
+
+func main() {
+ memstats := new(runtime.MemStats)
+ runtime.Free(runtime.Alloc(1))
+ runtime.ReadMemStats(memstats)
+ if *chatty {
+ fmt.Printf("%+v %v\n", memstats, uint64(0))
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/mallocrand.go b/gcc-4.7/libgo/go/runtime/mallocrand.go
new file mode 100644
index 000000000..f1bcb89cf
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/mallocrand.go
@@ -0,0 +1,93 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Random malloc test.
+
+package main
+
+import (
+ "flag"
+ "math/rand"
+ "runtime"
+ "unsafe"
+)
+
+var chatty = flag.Bool("v", false, "chatty")
+
+var footprint uint64
+var allocated uint64
+
+func bigger() {
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ if f := memstats.Sys; footprint < f {
+ footprint = f
+ if *chatty {
+ println("Footprint", footprint, " for ", allocated)
+ }
+ if footprint > 1e9 {
+ println("too big")
+ panic("fail")
+ }
+ }
+}
+
+// Prime the data structures by allocating one of
+// each block in order. After this, there should be
+// little reason to ask for more memory from the OS.
+func prime() {
+ for i := 0; i < 16; i++ {
+ b := runtime.Alloc(1 << uint(i))
+ runtime.Free(b)
+ }
+ for i := uintptr(0); i < 256; i++ {
+ b := runtime.Alloc(i << 12)
+ runtime.Free(b)
+ }
+}
+
+func memset(b *byte, c byte, n uintptr) {
+ np := uintptr(n)
+ for i := uintptr(0); i < np; i++ {
+ *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(b)) + i)) = c
+ }
+}
+
+func main() {
+ flag.Parse()
+ // prime()
+ var blocks [1]struct {
+ base *byte
+ siz uintptr
+ }
+ for i := 0; i < 1<<10; i++ {
+ if i%(1<<10) == 0 && *chatty {
+ println(i)
+ }
+ b := rand.Int() % len(blocks)
+ if blocks[b].base != nil {
+ // println("Free", blocks[b].siz, blocks[b].base)
+ runtime.Free(blocks[b].base)
+ blocks[b].base = nil
+ allocated -= uint64(blocks[b].siz)
+ continue
+ }
+ siz := uintptr(rand.Int() >> (11 + rand.Uint32()%20))
+ base := runtime.Alloc(siz)
+ // ptr := uintptr(syscall.BytePtr(base))+uintptr(siz/2)
+ // obj, size, ref, ok := allocator.find(ptr)
+ // if obj != base || *ref != 0 || !ok {
+ // println("find", siz, obj, ref, ok)
+ // panic("fail")
+ // }
+ blocks[b].base = base
+ blocks[b].siz = siz
+ allocated += uint64(siz)
+ // println("Alloc", siz, base)
+ memset(base, 0xbb, siz)
+ bigger()
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/mallocrep.go b/gcc-4.7/libgo/go/runtime/mallocrep.go
new file mode 100644
index 000000000..03ee71edb
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/mallocrep.go
@@ -0,0 +1,72 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Repeated malloc test.
+
+// +build ignore
+
+package main
+
+import (
+ "flag"
+ "runtime"
+)
+
+var chatty = flag.Bool("v", false, "chatty")
+
+var oldsys uint64
+var memstats runtime.MemStats
+
+func bigger() {
+ st := &memstats
+ runtime.ReadMemStats(st)
+ if oldsys < st.Sys {
+ oldsys = st.Sys
+ if *chatty {
+ println(st.Sys, " system bytes for ", st.Alloc, " Go bytes")
+ }
+ if st.Sys > 1e9 {
+ println("too big")
+ panic("fail")
+ }
+ }
+}
+
+func main() {
+ runtime.GC() // clean up garbage from init
+ runtime.ReadMemStats(&memstats) // first call can do some allocations
+ runtime.MemProfileRate = 0 // disable profiler
+ stacks := memstats.Alloc // ignore stacks
+ flag.Parse()
+ for i := 0; i < 1<<7; i++ {
+ for j := 1; j <= 1<<22; j <<= 1 {
+ if i == 0 && *chatty {
+ println("First alloc:", j)
+ }
+ if a := memstats.Alloc - stacks; a != 0 {
+ println("no allocations but stats report", a, "bytes allocated")
+ panic("fail")
+ }
+ b := runtime.Alloc(uintptr(j))
+ runtime.ReadMemStats(&memstats)
+ during := memstats.Alloc - stacks
+ runtime.Free(b)
+ runtime.ReadMemStats(&memstats)
+ if a := memstats.Alloc - stacks; a != 0 {
+ println("allocated ", j, ": wrong stats: during=", during, " after=", a, " (want 0)")
+ panic("fail")
+ }
+ bigger()
+ }
+ if i%(1<<10) == 0 && *chatty {
+ println(i)
+ }
+ if i == 0 {
+ if *chatty {
+ println("Primed", i)
+ }
+ // runtime.frozen = true
+ }
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/mallocrep1.go b/gcc-4.7/libgo/go/runtime/mallocrep1.go
new file mode 100644
index 000000000..41c104c0b
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/mallocrep1.go
@@ -0,0 +1,143 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Repeated malloc test.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "strconv"
+)
+
+var chatty = flag.Bool("v", false, "chatty")
+var reverse = flag.Bool("r", false, "reverse")
+var longtest = flag.Bool("l", false, "long test")
+
+var b []*byte
+var stats = new(runtime.MemStats)
+
+func OkAmount(size, n uintptr) bool {
+ if n < size {
+ return false
+ }
+ if size < 16*8 {
+ if n > size+16 {
+ return false
+ }
+ } else {
+ if n > size*9/8 {
+ return false
+ }
+ }
+ return true
+}
+
+func AllocAndFree(size, count int) {
+ if *chatty {
+ fmt.Printf("size=%d count=%d ...\n", size, count)
+ }
+ runtime.ReadMemStats(stats)
+ n1 := stats.Alloc
+ for i := 0; i < count; i++ {
+ b[i] = runtime.Alloc(uintptr(size))
+ base, n := runtime.Lookup(b[i])
+ if base != b[i] || !OkAmount(uintptr(size), n) {
+ println("lookup failed: got", base, n, "for", b[i])
+ panic("fail")
+ }
+ runtime.ReadMemStats(stats)
+ if stats.Sys > 1e9 {
+ println("too much memory allocated")
+ panic("fail")
+ }
+ }
+ runtime.ReadMemStats(stats)
+ n2 := stats.Alloc
+ if *chatty {
+ fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats)
+ }
+ n3 := stats.Alloc
+ for j := 0; j < count; j++ {
+ i := j
+ if *reverse {
+ i = count - 1 - j
+ }
+ alloc := uintptr(stats.Alloc)
+ base, n := runtime.Lookup(b[i])
+ if base != b[i] || !OkAmount(uintptr(size), n) {
+ println("lookup failed: got", base, n, "for", b[i])
+ panic("fail")
+ }
+ runtime.Free(b[i])
+ runtime.ReadMemStats(stats)
+ if stats.Alloc != uint64(alloc-n) {
+ println("free alloc got", stats.Alloc, "expected", alloc-n, "after free of", n)
+ panic("fail")
+ }
+ if stats.Sys > 1e9 {
+ println("too much memory allocated")
+ panic("fail")
+ }
+ }
+ runtime.ReadMemStats(stats)
+ n4 := stats.Alloc
+
+ if *chatty {
+ fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats)
+ }
+ if n2-n1 != n3-n4 {
+ println("wrong alloc count: ", n2-n1, n3-n4)
+ panic("fail")
+ }
+}
+
+func atoi(s string) int {
+ i, _ := strconv.Atoi(s)
+ return i
+}
+
+func main() {
+ runtime.MemProfileRate = 0 // disable profiler
+ flag.Parse()
+ b = make([]*byte, 10000)
+ if flag.NArg() > 0 {
+ AllocAndFree(atoi(flag.Arg(0)), atoi(flag.Arg(1)))
+ return
+ }
+ maxb := 1 << 22
+ if !*longtest {
+ maxb = 1 << 19
+ }
+ for j := 1; j <= maxb; j <<= 1 {
+ n := len(b)
+ max := uintptr(1 << 28)
+ if !*longtest {
+ max = uintptr(maxb)
+ }
+ if uintptr(j)*uintptr(n) > max {
+ n = int(max / uintptr(j))
+ }
+ if n < 10 {
+ n = 10
+ }
+ for m := 1; m <= n; {
+ AllocAndFree(j, m)
+ if m == n {
+ break
+ }
+ m = 5 * m / 4
+ if m < 4 {
+ m++
+ }
+ if m > n {
+ m = n
+ }
+ }
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/mem.go b/gcc-4.7/libgo/go/runtime/mem.go
new file mode 100644
index 000000000..95e8aa7a5
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/mem.go
@@ -0,0 +1,71 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// A MemStats records statistics about the memory allocator.
+type MemStats struct {
+ // General statistics.
+ Alloc uint64 // bytes allocated and still in use
+ TotalAlloc uint64 // bytes allocated (even if freed)
+ Sys uint64 // bytes obtained from system (should be sum of XxxSys below)
+ Lookups uint64 // number of pointer lookups
+ Mallocs uint64 // number of mallocs
+ Frees uint64 // number of frees
+
+ // Main allocation heap statistics.
+ HeapAlloc uint64 // bytes allocated and still in use
+ HeapSys uint64 // bytes obtained from system
+ HeapIdle uint64 // bytes in idle spans
+ HeapInuse uint64 // bytes in non-idle span
+ HeapReleased uint64 // bytes released to the OS
+ HeapObjects uint64 // total number of allocated objects
+
+ // Low-level fixed-size structure allocator statistics.
+ // Inuse is bytes used now.
+ // Sys is bytes obtained from system.
+ StackInuse uint64 // bootstrap stacks
+ StackSys uint64
+ MSpanInuse uint64 // mspan structures
+ MSpanSys uint64
+ MCacheInuse uint64 // mcache structures
+ MCacheSys uint64
+ BuckHashSys uint64 // profiling bucket hash table
+
+ // Garbage collector statistics.
+ NextGC uint64 // next run in HeapAlloc time (bytes)
+ LastGC uint64 // last run in absolute time (ns)
+ PauseTotalNs uint64
+ PauseNs [256]uint64 // most recent GC pause times
+ NumGC uint32
+ EnableGC bool
+ DebugGC bool
+
+ // Per-size allocation statistics.
+ // 61 is NumSizeClasses in the C code.
+ BySize [61]struct {
+ Size uint32
+ Mallocs uint64
+ Frees uint64
+ }
+}
+
+var Sizeof_C_MStats uintptr // filled in by malloc.goc
+
+var VmemStats MemStats
+
+func init() {
+ if Sizeof_C_MStats != unsafe.Sizeof(VmemStats) {
+ println(Sizeof_C_MStats, unsafe.Sizeof(VmemStats))
+ panic("MStats vs MemStatsType size mismatch")
+ }
+}
+
+// ReadMemStats populates m with memory allocator statistics.
+func ReadMemStats(m *MemStats)
+
+// GC runs a garbage collection.
+func GC()
diff --git a/gcc-4.7/libgo/go/runtime/mfinal_test.go b/gcc-4.7/libgo/go/runtime/mfinal_test.go
new file mode 100644
index 000000000..de632717a
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/mfinal_test.go
@@ -0,0 +1,64 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+func fin(v *int) {
+}
+
+func BenchmarkFinalizer(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ var wg sync.WaitGroup
+ wg.Add(procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ var data [CallsPerSched]*int
+ for i := 0; i < CallsPerSched; i++ {
+ data[i] = new(int)
+ }
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for i := 0; i < CallsPerSched; i++ {
+ runtime.SetFinalizer(data[i], fin)
+ }
+ for i := 0; i < CallsPerSched; i++ {
+ runtime.SetFinalizer(data[i], nil)
+ }
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkFinalizerRun(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ var wg sync.WaitGroup
+ wg.Add(procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for i := 0; i < CallsPerSched; i++ {
+ v := new(int)
+ runtime.SetFinalizer(v, fin)
+ }
+ runtime.GC()
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/gcc-4.7/libgo/go/runtime/pprof/pprof.go b/gcc-4.7/libgo/go/runtime/pprof/pprof.go
new file mode 100644
index 000000000..87f17d2db
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/pprof/pprof.go
@@ -0,0 +1,600 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pprof writes runtime profiling data in the format expected
+// by the pprof visualization tool.
+// For more information about pprof, see
+// http://code.google.com/p/google-perftools/.
+package pprof
+
+import (
+ "bufio"
+ "bytes"
+ _ "debug/elf"
+ "fmt"
+ "io"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "text/tabwriter"
+)
+
+// BUG(rsc): A bug in the OS X Snow Leopard 64-bit kernel prevents
+// CPU profiling from giving accurate results on that system.
+
+// A Profile is a collection of stack traces showing the call sequences
+// that led to instances of a particular event, such as allocation.
+// Packages can create and maintain their own profiles; the most common
+// use is for tracking resources that must be explicitly closed, such as files
+// or network connections.
+//
+// A Profile's methods can be called from multiple goroutines simultaneously.
+//
+// Each Profile has a unique name. A few profiles are predefined:
+//
+// goroutine - stack traces of all current goroutines
+// heap - a sampling of all heap allocations
+// threadcreate - stack traces that led to the creation of new OS threads
+//
+// These predefine profiles maintain themselves and panic on an explicit
+// Add or Remove method call.
+//
+// The CPU profile is not available as a Profile. It has a special API,
+// the StartCPUProfile and StopCPUProfile functions, because it streams
+// output to a writer during profiling.
+//
+type Profile struct {
+ name string
+ mu sync.Mutex
+ m map[interface{}][]uintptr
+ count func() int
+ write func(io.Writer, int) error
+}
+
+// profiles records all registered profiles.
+var profiles struct {
+ mu sync.Mutex
+ m map[string]*Profile
+}
+
+var goroutineProfile = &Profile{
+ name: "goroutine",
+ count: countGoroutine,
+ write: writeGoroutine,
+}
+
+var threadcreateProfile = &Profile{
+ name: "threadcreate",
+ count: countThreadCreate,
+ write: writeThreadCreate,
+}
+
+var heapProfile = &Profile{
+ name: "heap",
+ count: countHeap,
+ write: writeHeap,
+}
+
+func lockProfiles() {
+ profiles.mu.Lock()
+ if profiles.m == nil {
+ // Initial built-in profiles.
+ profiles.m = map[string]*Profile{
+ "goroutine": goroutineProfile,
+ "threadcreate": threadcreateProfile,
+ "heap": heapProfile,
+ }
+ }
+}
+
+func unlockProfiles() {
+ profiles.mu.Unlock()
+}
+
+// NewProfile creates a new profile with the given name.
+// If a profile with that name already exists, NewProfile panics.
+// The convention is to use a 'import/path.' prefix to create
+// separate name spaces for each package.
+func NewProfile(name string) *Profile {
+ lockProfiles()
+ defer unlockProfiles()
+ if name == "" {
+ panic("pprof: NewProfile with empty name")
+ }
+ if profiles.m[name] != nil {
+ panic("pprof: NewProfile name already in use: " + name)
+ }
+ p := &Profile{
+ name: name,
+ m: map[interface{}][]uintptr{},
+ }
+ profiles.m[name] = p
+ return p
+}
+
+// Lookup returns the profile with the given name, or nil if no such profile exists.
+func Lookup(name string) *Profile {
+ lockProfiles()
+ defer unlockProfiles()
+ return profiles.m[name]
+}
+
+// Profiles returns a slice of all the known profiles, sorted by name.
+func Profiles() []*Profile {
+ lockProfiles()
+ defer unlockProfiles()
+
+ var all []*Profile
+ for _, p := range profiles.m {
+ all = append(all, p)
+ }
+
+ sort.Sort(byName(all))
+ return all
+}
+
+type byName []*Profile
+
+func (x byName) Len() int { return len(x) }
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byName) Less(i, j int) bool { return x[i].name < x[j].name }
+
+// Name returns this profile's name, which can be passed to Lookup to reobtain the profile.
+func (p *Profile) Name() string {
+ return p.name
+}
+
+// Count returns the number of execution stacks currently in the profile.
+func (p *Profile) Count() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.count != nil {
+ return p.count()
+ }
+ return len(p.m)
+}
+
+// Add adds the current execution stack to the profile, associated with value.
+// Add stores value in an internal map, so value must be suitable for use as
+// a map key and will not be garbage collected until the corresponding
+// call to Remove. Add panics if the profile already contains a stack for value.
+//
+// The skip parameter has the same meaning as runtime.Caller's skip
+// and controls where the stack trace begins. Passing skip=0 begins the
+// trace in the function calling Add. For example, given this
+// execution stack:
+//
+// Add
+// called from rpc.NewClient
+// called from mypkg.Run
+// called from main.main
+//
+// Passing skip=0 begins the stack trace at the call to Add inside rpc.NewClient.
+// Passing skip=1 begins the stack trace at the call to NewClient inside mypkg.Run.
+//
+func (p *Profile) Add(value interface{}, skip int) {
+ if p.name == "" {
+ panic("pprof: use of uninitialized Profile")
+ }
+ if p.write != nil {
+ panic("pprof: Add called on built-in Profile " + p.name)
+ }
+
+ stk := make([]uintptr, 32)
+ n := runtime.Callers(skip+1, stk[:])
+
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.m[value] != nil {
+ panic("pprof: Profile.Add of duplicate value")
+ }
+ p.m[value] = stk[:n]
+}
+
+// Remove removes the execution stack associated with value from the profile.
+// It is a no-op if the value is not in the profile.
+func (p *Profile) Remove(value interface{}) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ delete(p.m, value)
+}
+
+// WriteTo writes a pprof-formatted snapshot of the profile to w.
+// If a write to w returns an error, WriteTo returns that error.
+// Otherwise, WriteTo returns nil.
+//
+// The debug parameter enables additional output.
+// Passing debug=0 prints only the hexadecimal addresses that pprof needs.
+// Passing debug=1 adds comments translating addresses to function names
+// and line numbers, so that a programmer can read the profile without tools.
+//
+// The predefined profiles may assign meaning to other debug values;
+// for example, when printing the "goroutine" profile, debug=2 means to
+// print the goroutine stacks in the same form that a Go program uses
+// when dying due to an unrecovered panic.
+func (p *Profile) WriteTo(w io.Writer, debug int) error {
+ if p.name == "" {
+ panic("pprof: use of zero Profile")
+ }
+ if p.write != nil {
+ return p.write(w, debug)
+ }
+
+ // Obtain consistent snapshot under lock; then process without lock.
+ var all [][]uintptr
+ p.mu.Lock()
+ for _, stk := range p.m {
+ all = append(all, stk)
+ }
+ p.mu.Unlock()
+
+ // Map order is non-deterministic; make output deterministic.
+ sort.Sort(stackProfile(all))
+
+ return printCountProfile(w, debug, p.name, stackProfile(all))
+}
+
+type stackProfile [][]uintptr
+
+func (x stackProfile) Len() int { return len(x) }
+func (x stackProfile) Stack(i int) []uintptr { return x[i] }
+func (x stackProfile) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x stackProfile) Less(i, j int) bool {
+ t, u := x[i], x[j]
+ for k := 0; k < len(t) && k < len(u); k++ {
+ if t[k] != u[k] {
+ return t[k] < u[k]
+ }
+ }
+ return len(t) < len(u)
+}
+
+// A countProfile is a set of stack traces to be printed as counts
+// grouped by stack trace. There are multiple implementations:
+// all that matters is that we can find out how many traces there are
+// and obtain each trace in turn.
+type countProfile interface {
+ Len() int
+ Stack(i int) []uintptr
+}
+
+// printCountProfile prints a countProfile at the specified debug level.
+func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
+ b := bufio.NewWriter(w)
+ var tw *tabwriter.Writer
+ w = b
+ if debug > 0 {
+ tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ w = tw
+ }
+
+ fmt.Fprintf(w, "%s profile: total %d\n", name, p.Len())
+
+ // Build count of each stack.
+ var buf bytes.Buffer
+ key := func(stk []uintptr) string {
+ buf.Reset()
+ fmt.Fprintf(&buf, "@")
+ for _, pc := range stk {
+ fmt.Fprintf(&buf, " %#x", pc)
+ }
+ return buf.String()
+ }
+ m := map[string]int{}
+ n := p.Len()
+ for i := 0; i < n; i++ {
+ m[key(p.Stack(i))]++
+ }
+
+ // Print stacks, listing count on first occurrence of a unique stack.
+ for i := 0; i < n; i++ {
+ stk := p.Stack(i)
+ s := key(stk)
+ if count := m[s]; count != 0 {
+ fmt.Fprintf(w, "%d %s\n", count, s)
+ if debug > 0 {
+ printStackRecord(w, stk, false)
+ }
+ delete(m, s)
+ }
+ }
+
+ if tw != nil {
+ tw.Flush()
+ }
+ return b.Flush()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
+ show := allFrames
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ show = true
+ fmt.Fprintf(w, "#\t%#x\n", pc)
+ } else {
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ // This is useful mainly for allocation traces.
+ if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ show = true
+ fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", pc, f.Name(), pc-f.Entry(), file, line)
+ }
+ }
+ if !show {
+ // We didn't print anything; do it again,
+ // and this time include runtime functions.
+ printStackRecord(w, stk, true)
+ return
+ }
+ fmt.Fprintf(w, "\n")
+}
+
+// Interface to system profiles.
+
+type byInUseBytes []runtime.MemProfileRecord
+
+func (x byInUseBytes) Len() int { return len(x) }
+func (x byInUseBytes) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byInUseBytes) Less(i, j int) bool { return x[i].InUseBytes() > x[j].InUseBytes() }
+
+// WriteHeapProfile is shorthand for Lookup("heap").WriteTo(w, 0).
+// It is preserved for backwards compatibility.
+func WriteHeapProfile(w io.Writer) error {
+ return writeHeap(w, 0)
+}
+
+// countHeap returns the number of records in the heap profile.
+func countHeap() int {
+ n, _ := runtime.MemProfile(nil, false)
+ return n
+}
+
+// writeHeapProfile writes the current runtime heap profile to w.
+func writeHeap(w io.Writer, debug int) error {
+ // Find out how many records there are (MemProfile(nil, false)),
+ // allocate that many records, and get the data.
+ // There's a race—more records might be added between
+ // the two calls—so allocate a few extra records for safety
+ // and also try again if we're very unlucky.
+ // The loop should only execute one iteration in the common case.
+ var p []runtime.MemProfileRecord
+ n, ok := runtime.MemProfile(nil, false)
+ for {
+ // Allocate room for a slightly bigger profile,
+ // in case a few more entries have been added
+ // since the call to MemProfile.
+ p = make([]runtime.MemProfileRecord, n+50)
+ n, ok = runtime.MemProfile(p, false)
+ if ok {
+ p = p[0:n]
+ break
+ }
+ // Profile grew; try again.
+ }
+
+ sort.Sort(byInUseBytes(p))
+
+ b := bufio.NewWriter(w)
+ var tw *tabwriter.Writer
+ w = b
+ if debug > 0 {
+ tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ w = tw
+ }
+
+ var total runtime.MemProfileRecord
+ for i := range p {
+ r := &p[i]
+ total.AllocBytes += r.AllocBytes
+ total.AllocObjects += r.AllocObjects
+ total.FreeBytes += r.FreeBytes
+ total.FreeObjects += r.FreeObjects
+ }
+
+ // Technically the rate is MemProfileRate not 2*MemProfileRate,
+ // but early versions of the C++ heap profiler reported 2*MemProfileRate,
+ // so that's what pprof has come to expect.
+ fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
+ total.InUseObjects(), total.InUseBytes(),
+ total.AllocObjects, total.AllocBytes,
+ 2*runtime.MemProfileRate)
+
+ for i := range p {
+ r := &p[i]
+ fmt.Fprintf(w, "%d: %d [%d: %d] @",
+ r.InUseObjects(), r.InUseBytes(),
+ r.AllocObjects, r.AllocBytes)
+ for _, pc := range r.Stack() {
+ fmt.Fprintf(w, " %#x", pc)
+ }
+ fmt.Fprintf(w, "\n")
+ if debug > 0 {
+ printStackRecord(w, r.Stack(), false)
+ }
+ }
+
+ // Print memstats information too.
+ // Pprof will ignore, but useful for people
+ if debug > 0 {
+ s := new(runtime.MemStats)
+ runtime.ReadMemStats(s)
+ fmt.Fprintf(w, "\n# runtime.MemStats\n")
+ fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
+ fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
+ fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
+ fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
+ fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
+
+ fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
+ fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
+ fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
+ fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
+
+ fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
+ fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
+ fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
+ fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
+
+ fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
+ fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
+ fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
+ fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
+ fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
+ }
+
+ if tw != nil {
+ tw.Flush()
+ }
+ return b.Flush()
+}
+
+// countThreadCreate returns the size of the current ThreadCreateProfile.
+func countThreadCreate() int {
+ n, _ := runtime.ThreadCreateProfile(nil)
+ return n
+}
+
+// writeThreadCreate writes the current runtime ThreadCreateProfile to w.
+func writeThreadCreate(w io.Writer, debug int) error {
+ return writeRuntimeProfile(w, debug, "threadcreate", runtime.ThreadCreateProfile)
+}
+
+// countGoroutine returns the number of goroutines.
+func countGoroutine() int {
+ return runtime.NumGoroutine()
+}
+
+// writeGoroutine writes the current runtime GoroutineProfile to w.
+func writeGoroutine(w io.Writer, debug int) error {
+ if debug >= 2 {
+ return writeGoroutineStacks(w)
+ }
+ return writeRuntimeProfile(w, debug, "goroutine", runtime.GoroutineProfile)
+}
+
+func writeGoroutineStacks(w io.Writer) error {
+ // We don't know how big the buffer needs to be to collect
+ // all the goroutines. Start with 1 MB and try a few times, doubling each time.
+ // Give up and use a truncated trace if 64 MB is not enough.
+ buf := make([]byte, 1<<20)
+ for i := 0; ; i++ {
+ n := runtime.Stack(buf, true)
+ if n < len(buf) {
+ buf = buf[:n]
+ break
+ }
+ if len(buf) >= 64<<20 {
+ // Filled 64 MB - stop there.
+ break
+ }
+ buf = make([]byte, 2*len(buf))
+ }
+ _, err := w.Write(buf)
+ return err
+}
+
+func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runtime.StackRecord) (int, bool)) error {
+ // Find out how many records there are (fetch(nil)),
+ // allocate that many records, and get the data.
+ // There's a race—more records might be added between
+ // the two calls—so allocate a few extra records for safety
+ // and also try again if we're very unlucky.
+ // The loop should only execute one iteration in the common case.
+ var p []runtime.StackRecord
+ n, ok := fetch(nil)
+ for {
+ // Allocate room for a slightly bigger profile,
+ // in case a few more entries have been added
+ // since the call to ThreadProfile.
+ p = make([]runtime.StackRecord, n+10)
+ n, ok = fetch(p)
+ if ok {
+ p = p[0:n]
+ break
+ }
+ // Profile grew; try again.
+ }
+
+ return printCountProfile(w, debug, name, runtimeProfile(p))
+}
+
+type runtimeProfile []runtime.StackRecord
+
+func (p runtimeProfile) Len() int { return len(p) }
+func (p runtimeProfile) Stack(i int) []uintptr { return p[i].Stack() }
+
+var cpu struct {
+ sync.Mutex
+ profiling bool
+ done chan bool
+}
+
+// StartCPUProfile enables CPU profiling for the current process.
+// While profiling, the profile will be buffered and written to w.
+// StartCPUProfile returns an error if profiling is already enabled.
+func StartCPUProfile(w io.Writer) error {
+ // The runtime routines allow a variable profiling rate,
+ // but in practice operating systems cannot trigger signals
+ // at more than about 500 Hz, and our processing of the
+ // signal is not cheap (mostly getting the stack trace).
+ // 100 Hz is a reasonable choice: it is frequent enough to
+ // produce useful data, rare enough not to bog down the
+ // system, and a nice round number to make it easy to
+ // convert sample counts to seconds. Instead of requiring
+ // each client to specify the frequency, we hard code it.
+ const hz = 100
+
+ // Avoid queueing behind StopCPUProfile.
+ // Could use TryLock instead if we had it.
+ if cpu.profiling {
+ return fmt.Errorf("cpu profiling already in use")
+ }
+
+ cpu.Lock()
+ defer cpu.Unlock()
+ if cpu.done == nil {
+ cpu.done = make(chan bool)
+ }
+ // Double-check.
+ if cpu.profiling {
+ return fmt.Errorf("cpu profiling already in use")
+ }
+ cpu.profiling = true
+ runtime.SetCPUProfileRate(hz)
+ go profileWriter(w)
+ return nil
+}
+
+func profileWriter(w io.Writer) {
+ for {
+ data := runtime.CPUProfile()
+ if data == nil {
+ break
+ }
+ w.Write(data)
+ }
+ cpu.done <- true
+}
+
+// StopCPUProfile stops the current CPU profile, if any.
+// StopCPUProfile only returns after all the writes for the
+// profile have completed.
+func StopCPUProfile() {
+ cpu.Lock()
+ defer cpu.Unlock()
+
+ if !cpu.profiling {
+ return
+ }
+ cpu.profiling = false
+ runtime.SetCPUProfileRate(0)
+ <-cpu.done
+}
diff --git a/gcc-4.7/libgo/go/runtime/pprof/pprof_test.go b/gcc-4.7/libgo/go/runtime/pprof/pprof_test.go
new file mode 100644
index 000000000..e933058e5
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/pprof/pprof_test.go
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof_test
+
+import (
+ "bytes"
+ "hash/crc32"
+ "os/exec"
+ "runtime"
+ . "runtime/pprof"
+ "strings"
+ "testing"
+ "unsafe"
+)
+
+func TestCPUProfile(t *testing.T) {
+ switch runtime.GOOS {
+ case "darwin":
+ out, err := exec.Command("uname", "-a").CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ vers := string(out)
+ t.Logf("uname -a: %v", vers)
+ // Lion uses "Darwin Kernel Version 11".
+ if strings.Contains(vers, "Darwin Kernel Version 10") && strings.Contains(vers, "RELEASE_X86_64") {
+ t.Logf("skipping test on known-broken kernel (64-bit Leopard / Snow Leopard)")
+ return
+ }
+ case "plan9":
+ // unimplemented
+ return
+ }
+
+ buf := make([]byte, 100000)
+ var prof bytes.Buffer
+ if err := StartCPUProfile(&prof); err != nil {
+ t.Fatal(err)
+ }
+ // This loop takes about a quarter second on a 2 GHz laptop.
+ // We only need to get one 100 Hz clock tick, so we've got
+ // a 25x safety buffer.
+ for i := 0; i < 1000; i++ {
+ crc32.ChecksumIEEE(buf)
+ }
+ StopCPUProfile()
+
+ // Convert []byte to []uintptr.
+ bytes := prof.Bytes()
+ val := *(*[]uintptr)(unsafe.Pointer(&bytes))
+ val = val[:len(bytes)/int(unsafe.Sizeof(uintptr(0)))]
+
+ if len(val) < 10 {
+ t.Fatalf("profile too short: %#x", val)
+ }
+ if val[0] != 0 || val[1] != 3 || val[2] != 0 || val[3] != 1e6/100 || val[4] != 0 {
+ t.Fatalf("unexpected header %#x", val[:5])
+ }
+
+ // Check that profile is well formed and contains ChecksumIEEE.
+ found := false
+ val = val[5:]
+ for len(val) > 0 {
+ if len(val) < 2 || val[0] < 1 || val[1] < 1 || uintptr(len(val)) < 2+val[1] {
+ t.Fatalf("malformed profile. leftover: %#x", val)
+ }
+ for _, pc := range val[2 : 2+val[1]] {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ if strings.Contains(f.Name(), "ChecksumIEEE") ||
+ (strings.Contains(f.Name(), "update") && strings.Contains(f.Name(), "crc32")) {
+ found = true
+ }
+ }
+ val = val[2+val[1]:]
+ }
+
+ if !found {
+ t.Fatal("did not find ChecksumIEEE in the profile")
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/proc_test.go b/gcc-4.7/libgo/go/runtime/proc_test.go
new file mode 100644
index 000000000..32111080a
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/proc_test.go
@@ -0,0 +1,125 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+var stop = make(chan bool, 1)
+
+func perpetuumMobile() {
+ select {
+ case <-stop:
+ default:
+ go perpetuumMobile()
+ }
+}
+
+func TestStopTheWorldDeadlock(t *testing.T) {
+ if testing.Short() {
+ t.Logf("skipping during short test")
+ return
+ }
+ maxprocs := runtime.GOMAXPROCS(3)
+ compl := make(chan bool, 2)
+ go func() {
+ for i := 0; i != 1000; i += 1 {
+ runtime.GC()
+ }
+ compl <- true
+ }()
+ go func() {
+ for i := 0; i != 1000; i += 1 {
+ runtime.GOMAXPROCS(3)
+ }
+ compl <- true
+ }()
+ go perpetuumMobile()
+ <-compl
+ <-compl
+ stop <- true
+ runtime.GOMAXPROCS(maxprocs)
+}
+
+func stackGrowthRecursive(i int) {
+ var pad [128]uint64
+ if i != 0 && pad[0] == 0 {
+ stackGrowthRecursive(i - 1)
+ }
+}
+
+func BenchmarkStackGrowth(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ stackGrowthRecursive(10)
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSyscall(b *testing.B) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Entersyscall()
+ runtime.Exitsyscall()
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
+
+func BenchmarkSyscallWork(b *testing.B) {
+ const CallsPerSched = 1000
+ const LocalWork = 100
+ procs := runtime.GOMAXPROCS(-1)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 42
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Entersyscall()
+ for i := 0; i < LocalWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ runtime.Exitsyscall()
+ }
+ }
+ c <- foo == 42
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/runtime_test.go b/gcc-4.7/libgo/go/runtime/runtime_test.go
new file mode 100644
index 000000000..d68b363e9
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/runtime_test.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "io"
+ "testing"
+)
+
+var errf error
+
+func errfn() error {
+ return errf
+}
+
+func errfn1() error {
+ return io.EOF
+}
+
+func BenchmarkIfaceCmp100(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < 100; j++ {
+ if errfn() == io.EOF {
+ b.Fatal("bad comparison")
+ }
+ }
+ }
+}
+
+func BenchmarkIfaceCmpNil100(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < 100; j++ {
+ if errfn1() == nil {
+ b.Fatal("bad comparison")
+ }
+ }
+ }
+}
diff --git a/gcc-4.7/libgo/go/runtime/softfloat64.go b/gcc-4.7/libgo/go/runtime/softfloat64.go
new file mode 100644
index 000000000..4fcf8f269
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/softfloat64.go
@@ -0,0 +1,498 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Software IEEE754 64-bit floating point.
+// Only referred to (and thus linked in) by arm port
+// and by tests in this directory.
+
+package runtime
+
+const (
+ mantbits64 uint = 52
+ expbits64 uint = 11
+ bias64 = -1<<(expbits64-1) + 1
+
+ nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1
+ inf64 uint64 = (1<<expbits64 - 1) << mantbits64
+ neg64 uint64 = 1 << (expbits64 + mantbits64)
+
+ mantbits32 uint = 23
+ expbits32 uint = 8
+ bias32 = -1<<(expbits32-1) + 1
+
+ nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1
+ inf32 uint32 = (1<<expbits32 - 1) << mantbits32
+ neg32 uint32 = 1 << (expbits32 + mantbits32)
+)
+
+func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) {
+ sign = f & (1 << (mantbits64 + expbits64))
+ mant = f & (1<<mantbits64 - 1)
+ exp = int(f>>mantbits64) & (1<<expbits64 - 1)
+
+ switch exp {
+ case 1<<expbits64 - 1:
+ if mant != 0 {
+ nan = true
+ return
+ }
+ inf = true
+ return
+
+ case 0:
+ // denormalized
+ if mant != 0 {
+ exp += bias64 + 1
+ for mant < 1<<mantbits64 {
+ mant <<= 1
+ exp--
+ }
+ }
+
+ default:
+ // add implicit top bit
+ mant |= 1 << mantbits64
+ exp += bias64
+ }
+ return
+}
+
+func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) {
+ sign = f & (1 << (mantbits32 + expbits32))
+ mant = f & (1<<mantbits32 - 1)
+ exp = int(f>>mantbits32) & (1<<expbits32 - 1)
+
+ switch exp {
+ case 1<<expbits32 - 1:
+ if mant != 0 {
+ nan = true
+ return
+ }
+ inf = true
+ return
+
+ case 0:
+ // denormalized
+ if mant != 0 {
+ exp += bias32 + 1
+ for mant < 1<<mantbits32 {
+ mant <<= 1
+ exp--
+ }
+ }
+
+ default:
+ // add implicit top bit
+ mant |= 1 << mantbits32
+ exp += bias32
+ }
+ return
+}
+
+func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 {
+ mant0, exp0, trunc0 := mant, exp, trunc
+ if mant == 0 {
+ return sign
+ }
+ for mant < 1<<mantbits64 {
+ mant <<= 1
+ exp--
+ }
+ for mant >= 4<<mantbits64 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant >= 2<<mantbits64 {
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ if mant >= 4<<mantbits64 {
+ mant >>= 1
+ exp++
+ }
+ }
+ mant >>= 1
+ exp++
+ }
+ if exp >= 1<<expbits64-1+bias64 {
+ return sign ^ inf64
+ }
+ if exp < bias64+1 {
+ if exp < bias64-int(mantbits64) {
+ return sign | 0
+ }
+ // repeat expecting denormal
+ mant, exp, trunc = mant0, exp0, trunc0
+ for exp < bias64 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ }
+ mant >>= 1
+ exp++
+ if mant < 1<<mantbits64 {
+ return sign | mant
+ }
+ }
+ return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1)
+}
+
+func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 {
+ mant0, exp0, trunc0 := mant, exp, trunc
+ if mant == 0 {
+ return sign
+ }
+ for mant < 1<<mantbits32 {
+ mant <<= 1
+ exp--
+ }
+ for mant >= 4<<mantbits32 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant >= 2<<mantbits32 {
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ if mant >= 4<<mantbits32 {
+ mant >>= 1
+ exp++
+ }
+ }
+ mant >>= 1
+ exp++
+ }
+ if exp >= 1<<expbits32-1+bias32 {
+ return sign ^ inf32
+ }
+ if exp < bias32+1 {
+ if exp < bias32-int(mantbits32) {
+ return sign | 0
+ }
+ // repeat expecting denormal
+ mant, exp, trunc = mant0, exp0, trunc0
+ for exp < bias32 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ }
+ mant >>= 1
+ exp++
+ if mant < 1<<mantbits32 {
+ return sign | mant
+ }
+ }
+ return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1)
+}
+
+func fadd64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN + x or x + NaN = NaN
+ return nan64
+
+ case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN
+ return nan64
+
+ case fi: // ±Inf + g = ±Inf
+ return f
+
+ case gi: // f + ±Inf = ±Inf
+ return g
+
+ case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0
+ return f
+
+ case fm == 0: // 0 + g = g but 0 + -0 = +0
+ if gm == 0 {
+ g ^= gs
+ }
+ return g
+
+ case gm == 0: // f + 0 = f
+ return f
+
+ }
+
+ if fe < ge || fe == ge && fm < gm {
+ f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe
+ }
+
+ shift := uint(fe - ge)
+ fm <<= 2
+ gm <<= 2
+ trunc := gm & (1<<shift - 1)
+ gm >>= shift
+ if fs == gs {
+ fm += gm
+ } else {
+ fm -= gm
+ if trunc != 0 {
+ fm--
+ }
+ }
+ if fm == 0 {
+ fs = 0
+ }
+ return fpack64(fs, fm, fe-2, trunc)
+}
+
+func fsub64(f, g uint64) uint64 {
+ return fadd64(f, fneg64(g))
+}
+
+func fneg64(f uint64) uint64 {
+ return f ^ (1 << (mantbits64 + expbits64))
+}
+
+func fmul64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN * g or f * NaN = NaN
+ return nan64
+
+ case fi && gi: // Inf * Inf = Inf (with sign adjusted)
+ return f ^ gs
+
+ case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN
+ return nan64
+
+ case fm == 0: // 0 * x = 0 (with sign adjusted)
+ return f ^ gs
+
+ case gm == 0: // x * 0 = 0 (with sign adjusted)
+ return g ^ fs
+ }
+
+ // 53-bit * 53-bit = 107- or 108-bit
+ lo, hi := mullu(fm, gm)
+ shift := mantbits64 - 1
+ trunc := lo & (1<<shift - 1)
+ mant := hi<<(64-shift) | lo>>shift
+ return fpack64(fs^gs, mant, fe+ge-1, trunc)
+}
+
+func fdiv64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN / g = f / NaN = NaN
+ return nan64
+
+ case fi && gi: // ±Inf / ±Inf = NaN
+ return nan64
+
+ case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN
+ return nan64
+
+ case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf
+ return fs ^ gs ^ inf64
+
+ case gi, fm == 0: // f / Inf = 0 / g = Inf
+ return fs ^ gs ^ 0
+ }
+ _, _, _, _ = fi, fn, gi, gn
+
+ // 53-bit<<54 / 53-bit = 53- or 54-bit.
+ shift := mantbits64 + 2
+ q, r := divlu(fm>>(64-shift), fm<<shift, gm)
+ return fpack64(fs^gs, q, fe-ge-2, r)
+}
+
+func f64to32(f uint64) uint32 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ if fn {
+ return nan32
+ }
+ fs32 := uint32(fs >> 32)
+ if fi {
+ return fs32 ^ inf32
+ }
+ const d = mantbits64 - mantbits32 - 1
+ return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1)))
+}
+
+func f32to64(f uint32) uint64 {
+ const d = mantbits64 - mantbits32
+ fs, fm, fe, fi, fn := funpack32(f)
+ if fn {
+ return nan64
+ }
+ fs64 := uint64(fs) << 32
+ if fi {
+ return fs64 ^ inf64
+ }
+ return fpack64(fs64, uint64(fm)<<d, fe, 0)
+}
+
+func fcmp64(f, g uint64) (cmp int, isnan bool) {
+ fs, fm, _, fi, fn := funpack64(f)
+ gs, gm, _, gi, gn := funpack64(g)
+
+ switch {
+ case fn, gn: // flag NaN
+ return 0, true
+
+ case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0
+ return 0, false
+
+ case fs > gs: // f < 0, g > 0
+ return -1, false
+
+ case fs < gs: // f > 0, g < 0
+ return +1, false
+
+ // Same sign, not NaN.
+ // Can compare encodings directly now.
+ // Reverse for sign.
+ case fs == 0 && f < g, fs != 0 && f > g:
+ return -1, false
+
+ case fs == 0 && f > g, fs != 0 && f < g:
+ return +1, false
+ }
+
+ // f == g
+ return 0, false
+}
+
+func f64toint(f uint64) (val int64, ok bool) {
+ fs, fm, fe, fi, fn := funpack64(f)
+
+ switch {
+ case fi, fn: // NaN
+ return 0, false
+
+ case fe < -1: // f < 0.5
+ return 0, false
+
+ case fe > 63: // f >= 2^63
+ if fs != 0 && fm == 0 { // f == -2^63
+ return -1 << 63, true
+ }
+ if fs != 0 {
+ return 0, false
+ }
+ return 0, false
+ }
+
+ for fe > int(mantbits64) {
+ fe--
+ fm <<= 1
+ }
+ for fe < int(mantbits64) {
+ fe++
+ fm >>= 1
+ }
+ val = int64(fm)
+ if fs != 0 {
+ val = -val
+ }
+ return val, true
+}
+
+func fintto64(val int64) (f uint64) {
+ fs := uint64(val) & (1 << 63)
+ mant := uint64(val)
+ if fs != 0 {
+ mant = -mant
+ }
+ return fpack64(fs, mant, int(mantbits64), 0)
+}
+
+// 64x64 -> 128 multiply.
+// adapted from hacker's delight.
+func mullu(u, v uint64) (lo, hi uint64) {
+ const (
+ s = 32
+ mask = 1<<s - 1
+ )
+ u0 := u & mask
+ u1 := u >> s
+ v0 := v & mask
+ v1 := v >> s
+ w0 := u0 * v0
+ t := u1*v0 + w0>>s
+ w1 := t & mask
+ w2 := t >> s
+ w1 += u0 * v1
+ return u * v, u1*v1 + w2 + w1>>s
+}
+
+// 128/64 -> 64 quotient, 64 remainder.
+// adapted from hacker's delight
+func divlu(u1, u0, v uint64) (q, r uint64) {
+ const b = 1 << 32
+
+ if u1 >= v {
+ return 1<<64 - 1, 1<<64 - 1
+ }
+
+ // s = nlz(v); v <<= s
+ s := uint(0)
+ for v&(1<<63) == 0 {
+ s++
+ v <<= 1
+ }
+
+ vn1 := v >> 32
+ vn0 := v & (1<<32 - 1)
+ un32 := u1<<s | u0>>(64-s)
+ un10 := u0 << s
+ un1 := un10 >> 32
+ un0 := un10 & (1<<32 - 1)
+ q1 := un32 / vn1
+ rhat := un32 - q1*vn1
+
+again1:
+ if q1 >= b || q1*vn0 > b*rhat+un1 {
+ q1--
+ rhat += vn1
+ if rhat < b {
+ goto again1
+ }
+ }
+
+ un21 := un32*b + un1 - q1*v
+ q0 := un21 / vn1
+ rhat = un21 - q0*vn1
+
+again2:
+ if q0 >= b || q0*vn0 > b*rhat+un0 {
+ q0--
+ rhat += vn1
+ if rhat < b {
+ goto again2
+ }
+ }
+
+ return q1*b + q0, (un21*b + un0 - q0*v) >> s
+}
+
+// callable from C
+
+func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) }
+func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) }
+func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) }
+func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) }
+func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) }
+func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) }
+func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) }
+func fcmp64c(f, g uint64, ret *int, retnan *bool) { *ret, *retnan = fcmp64(f, g) }
+func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) }
+func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) }
diff --git a/gcc-4.7/libgo/go/runtime/softfloat64_test.go b/gcc-4.7/libgo/go/runtime/softfloat64_test.go
new file mode 100644
index 000000000..df63010fb
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/softfloat64_test.go
@@ -0,0 +1,198 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "math"
+ "math/rand"
+ . "runtime"
+ "testing"
+)
+
+// turn uint64 op into float64 op
+func fop(f func(x, y uint64) uint64) func(x, y float64) float64 {
+ return func(x, y float64) float64 {
+ bx := math.Float64bits(x)
+ by := math.Float64bits(y)
+ return math.Float64frombits(f(bx, by))
+ }
+}
+
+func add(x, y float64) float64 { return x + y }
+func sub(x, y float64) float64 { return x - y }
+func mul(x, y float64) float64 { return x * y }
+func div(x, y float64) float64 { return x / y }
+
+func TestFloat64(t *testing.T) {
+ base := []float64{
+ 0,
+ math.Copysign(0, -1),
+ -1,
+ 1,
+ math.NaN(),
+ math.Inf(+1),
+ math.Inf(-1),
+ 0.1,
+ 1.5,
+ 1.9999999999999998, // all 1s mantissa
+ 1.3333333333333333, // 1.010101010101...
+ 1.1428571428571428, // 1.001001001001...
+ 1.112536929253601e-308, // first normal
+ 2,
+ 4,
+ 8,
+ 16,
+ 32,
+ 64,
+ 128,
+ 256,
+ 3,
+ 12,
+ 1234,
+ 123456,
+ -0.1,
+ -1.5,
+ -1.9999999999999998,
+ -1.3333333333333333,
+ -1.1428571428571428,
+ -2,
+ -3,
+ 1e-200,
+ 1e-300,
+ 1e-310,
+ 5e-324,
+ 1e-105,
+ 1e-305,
+ 1e+200,
+ 1e+306,
+ 1e+307,
+ 1e+308,
+ }
+ all := make([]float64, 200)
+ copy(all, base)
+ for i := len(base); i < len(all); i++ {
+ all[i] = rand.NormFloat64()
+ }
+
+ test(t, "+", add, fop(Fadd64), all)
+ test(t, "-", sub, fop(Fsub64), all)
+ if GOARCH != "386" { // 386 is not precise!
+ test(t, "*", mul, fop(Fmul64), all)
+ test(t, "/", div, fop(Fdiv64), all)
+ }
+}
+
+// 64 -hw-> 32 -hw-> 64
+func trunc32(f float64) float64 {
+ return float64(float32(f))
+}
+
+// 64 -sw->32 -hw-> 64
+func to32sw(f float64) float64 {
+ return float64(math.Float32frombits(F64to32(math.Float64bits(f))))
+}
+
+// 64 -hw->32 -sw-> 64
+func to64sw(f float64) float64 {
+ return math.Float64frombits(F32to64(math.Float32bits(float32(f))))
+}
+
+// float64 -hw-> int64 -hw-> float64
+func hwint64(f float64) float64 {
+ return float64(int64(f))
+}
+
+// float64 -hw-> int32 -hw-> float64
+func hwint32(f float64) float64 {
+ return float64(int32(f))
+}
+
+// float64 -sw-> int64 -hw-> float64
+func toint64sw(f float64) float64 {
+ i, ok := F64toint(math.Float64bits(f))
+ if !ok {
+ // There's no right answer for out of range.
+ // Match the hardware to pass the test.
+ i = int64(f)
+ }
+ return float64(i)
+}
+
+// float64 -hw-> int64 -sw-> float64
+func fromint64sw(f float64) float64 {
+ return math.Float64frombits(Fintto64(int64(f)))
+}
+
+var nerr int
+
+func err(t *testing.T, format string, args ...interface{}) {
+ t.Errorf(format, args...)
+
+ // cut errors off after a while.
+ // otherwise we spend all our time
+ // allocating memory to hold the
+ // formatted output.
+ if nerr++; nerr >= 10 {
+ t.Fatal("too many errors")
+ }
+}
+
+func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all []float64) {
+ for _, f := range all {
+ for _, g := range all {
+ h := hw(f, g)
+ s := sw(f, g)
+ if !same(h, s) {
+ err(t, "%g %s %g = sw %g, hw %g\n", f, op, g, s, h)
+ }
+ testu(t, "to32", trunc32, to32sw, h)
+ testu(t, "to64", trunc32, to64sw, h)
+ testu(t, "toint64", hwint64, toint64sw, h)
+ testu(t, "fromint64", hwint64, fromint64sw, h)
+ testcmp(t, f, h)
+ testcmp(t, h, f)
+ testcmp(t, g, h)
+ testcmp(t, h, g)
+ }
+ }
+}
+
+func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) {
+ h := hw(v)
+ s := sw(v)
+ if !same(h, s) {
+ err(t, "%s %g = sw %g, hw %g\n", op, v, s, h)
+ }
+}
+
+func hwcmp(f, g float64) (cmp int, isnan bool) {
+ switch {
+ case f < g:
+ return -1, false
+ case f > g:
+ return +1, false
+ case f == g:
+ return 0, false
+ }
+ return 0, true // must be NaN
+}
+
+func testcmp(t *testing.T, f, g float64) {
+ hcmp, hisnan := hwcmp(f, g)
+ scmp, sisnan := Fcmp64(math.Float64bits(f), math.Float64bits(g))
+ if hcmp != scmp || hisnan != sisnan {
+ err(t, "cmp(%g, %g) = sw %v, %v, hw %v, %v\n", f, g, scmp, sisnan, hcmp, hisnan)
+ }
+}
+
+func same(f, g float64) bool {
+ if math.IsNaN(f) && math.IsNaN(g) {
+ return true
+ }
+ if math.Copysign(1, f) != math.Copysign(1, g) {
+ return false
+ }
+ return f == g
+}
diff --git a/gcc-4.7/libgo/go/runtime/symtab_test.go b/gcc-4.7/libgo/go/runtime/symtab_test.go
new file mode 100644
index 000000000..0db63c347
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/symtab_test.go
@@ -0,0 +1,55 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var _ = runtime.Caller
+var _ = strings.HasSuffix
+type _ testing.T
+
+/* runtime.Caller is not fully implemented for gccgo.
+
+func TestCaller(t *testing.T) {
+ procs := runtime.GOMAXPROCS(-1)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for i := 0; i < 1000; i++ {
+ testCallerFoo(t)
+ }
+ c <- true
+ }()
+ defer func() {
+ <-c
+ }()
+ }
+}
+
+func testCallerFoo(t *testing.T) {
+ testCallerBar(t)
+}
+
+func testCallerBar(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ pc, file, line, ok := runtime.Caller(i)
+ f := runtime.FuncForPC(pc)
+ if !ok ||
+ !strings.HasSuffix(file, "symtab_test.go") ||
+ (i == 0 && !strings.HasSuffix(f.Name(), "testCallerBar")) ||
+ (i == 1 && !strings.HasSuffix(f.Name(), "testCallerFoo")) ||
+ line < 5 || line > 1000 ||
+ f.Entry() >= pc {
+ t.Errorf("incorrect symbol info %d: %t %d %d %s %s %d",
+ i, ok, f.Entry(), pc, f.Name(), file, line)
+ }
+ }
+}
+
+*/
diff --git a/gcc-4.7/libgo/go/runtime/type.go b/gcc-4.7/libgo/go/runtime/type.go
new file mode 100644
index 000000000..c15c1c10c
--- /dev/null
+++ b/gcc-4.7/libgo/go/runtime/type.go
@@ -0,0 +1,55 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Runtime type representation.
+ * This file exists only to provide types that 6l can turn into
+ * DWARF information for use by gdb. Nothing else uses these.
+ * They should match the same types in ../reflect/type.go.
+ * For comments see ../reflect/type.go.
+ */
+
+package runtime
+
+import "unsafe"
+
+type commonType struct {
+ Kind uint8
+ align uint8
+ fieldAlign uint8
+ size uintptr
+ hash uint32
+
+ hashfn func(unsafe.Pointer, uintptr) uintptr
+ equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
+
+ string *string
+ *uncommonType
+ ptrToThis *commonType
+}
+
+type _method struct {
+ name *string
+ pkgPath *string
+ mtyp *commonType
+ typ *commonType
+ tfn unsafe.Pointer
+}
+
+type uncommonType struct {
+ name *string
+ pkgPath *string
+ methods []_method
+}
+
+type _imethod struct {
+ name *string
+ pkgPath *string
+ typ *commonType
+}
+
+type interfaceType struct {
+ commonType
+ methods []_imethod
+}