aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.8/libgo/go/sync
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.8/libgo/go/sync')
-rw-r--r--gcc-4.8/libgo/go/sync/atomic/atomic_test.go32
-rw-r--r--gcc-4.8/libgo/go/sync/atomic/race.go15
-rw-r--r--gcc-4.8/libgo/go/sync/cond.go3
-rw-r--r--gcc-4.8/libgo/go/sync/example_test.go6
-rw-r--r--gcc-4.8/libgo/go/sync/mutex.go1
-rw-r--r--gcc-4.8/libgo/go/sync/rwmutex.go4
-rw-r--r--gcc-4.8/libgo/go/sync/waitgroup.go9
7 files changed, 64 insertions, 6 deletions
diff --git a/gcc-4.8/libgo/go/sync/atomic/atomic_test.go b/gcc-4.8/libgo/go/sync/atomic/atomic_test.go
index 25be63b5a..c6c33dc3c 100644
--- a/gcc-4.8/libgo/go/sync/atomic/atomic_test.go
+++ b/gcc-4.8/libgo/go/sync/atomic/atomic_test.go
@@ -1119,7 +1119,7 @@ func TestStoreLoadRelAcq32(t *testing.T) {
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float32(i) {
- t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i)
}
}
}
@@ -1167,7 +1167,7 @@ func TestStoreLoadRelAcq64(t *testing.T) {
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float64(i) {
- t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i)
}
}
}
@@ -1177,3 +1177,31 @@ func TestStoreLoadRelAcq64(t *testing.T) {
<-c
<-c
}
+
+func shouldPanic(t *testing.T, name string, f func()) {
+ defer func() {
+ if recover() == nil {
+ t.Errorf("%s did not panic", name)
+ }
+ }()
+ f()
+}
+
+func TestUnaligned64(t *testing.T) {
+ // Unaligned 64-bit atomics on 32-bit systems are
+ // a continual source of pain. Test that on 32-bit systems they crash
+ // instead of failing silently.
+ if unsafe.Sizeof(int(0)) != 4 {
+ t.Skip("test only runs on 32-bit systems")
+ }
+
+ t.Skip("skipping test for gccgo")
+
+ x := make([]uint32, 4)
+ p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
+
+ shouldPanic(t, "LoadUint64", func() { LoadUint64(p) })
+ shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) })
+ shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) })
+ shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) })
+}
diff --git a/gcc-4.8/libgo/go/sync/atomic/race.go b/gcc-4.8/libgo/go/sync/atomic/race.go
index 242bbf298..2320b5707 100644
--- a/gcc-4.8/libgo/go/sync/atomic/race.go
+++ b/gcc-4.8/libgo/go/sync/atomic/race.go
@@ -25,6 +25,7 @@ func CompareAndSwapInt32(val *int32, old, new int32) bool {
}
func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -43,6 +44,7 @@ func CompareAndSwapInt64(val *int64, old, new int64) bool {
}
func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -57,6 +59,7 @@ func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) {
}
func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -71,6 +74,7 @@ func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swappe
}
func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -89,6 +93,7 @@ func AddInt32(val *int32, delta int32) int32 {
}
func AddUint32(val *uint32, delta uint32) (new uint32) {
+ _ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
@@ -105,6 +110,7 @@ func AddInt64(val *int64, delta int64) int64 {
}
func AddUint64(val *uint64, delta uint64) (new uint64) {
+ _ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
@@ -117,6 +123,7 @@ func AddUint64(val *uint64, delta uint64) (new uint64) {
}
func AddUintptr(val *uintptr, delta uintptr) (new uintptr) {
+ _ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
@@ -133,6 +140,7 @@ func LoadInt32(addr *int32) int32 {
}
func LoadUint32(addr *uint32) (val uint32) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -146,6 +154,7 @@ func LoadInt64(addr *int64) int64 {
}
func LoadUint64(addr *uint64) (val uint64) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -155,6 +164,7 @@ func LoadUint64(addr *uint64) (val uint64) {
}
func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -164,6 +174,7 @@ func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) {
}
func LoadUintptr(addr *uintptr) (val uintptr) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -177,6 +188,7 @@ func StoreInt32(addr *int32, val int32) {
}
func StoreUint32(addr *uint32, val uint32) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
@@ -189,6 +201,7 @@ func StoreInt64(addr *int64, val int64) {
}
func StoreUint64(addr *uint64, val uint64) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
@@ -197,6 +210,7 @@ func StoreUint64(addr *uint64, val uint64) {
}
func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
@@ -205,6 +219,7 @@ func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
}
func StoreUintptr(addr *uintptr, val uintptr) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
diff --git a/gcc-4.8/libgo/go/sync/cond.go b/gcc-4.8/libgo/go/sync/cond.go
index 491b98569..13547a8a1 100644
--- a/gcc-4.8/libgo/go/sync/cond.go
+++ b/gcc-4.8/libgo/go/sync/cond.go
@@ -57,6 +57,7 @@ func NewCond(l Locker) *Cond {
//
func (c *Cond) Wait() {
if raceenabled {
+ _ = c.m.state
raceDisable()
}
c.m.Lock()
@@ -80,6 +81,7 @@ func (c *Cond) Wait() {
// during the call.
func (c *Cond) Signal() {
if raceenabled {
+ _ = c.m.state
raceDisable()
}
c.m.Lock()
@@ -106,6 +108,7 @@ func (c *Cond) Signal() {
// during the call.
func (c *Cond) Broadcast() {
if raceenabled {
+ _ = c.m.state
raceDisable()
}
c.m.Lock()
diff --git a/gcc-4.8/libgo/go/sync/example_test.go b/gcc-4.8/libgo/go/sync/example_test.go
index 156492400..031c87f03 100644
--- a/gcc-4.8/libgo/go/sync/example_test.go
+++ b/gcc-4.8/libgo/go/sync/example_test.go
@@ -24,10 +24,10 @@ func ExampleWaitGroup() {
wg.Add(1)
// Launch a goroutine to fetch the URL.
go func(url string) {
+ // Decrement the counter when the goroutine completes.
+ defer wg.Done()
// Fetch the URL.
http.Get(url)
- // Decrement the counter.
- wg.Done()
}(url)
}
// Wait for all HTTP fetches to complete.
@@ -37,7 +37,7 @@ func ExampleWaitGroup() {
func ExampleOnce() {
var once sync.Once
onceBody := func() {
- fmt.Printf("Only once\n")
+ fmt.Println("Only once")
}
done := make(chan bool)
for i := 0; i < 10; i++ {
diff --git a/gcc-4.8/libgo/go/sync/mutex.go b/gcc-4.8/libgo/go/sync/mutex.go
index b4629ebca..73b337702 100644
--- a/gcc-4.8/libgo/go/sync/mutex.go
+++ b/gcc-4.8/libgo/go/sync/mutex.go
@@ -81,6 +81,7 @@ func (m *Mutex) Lock() {
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
if raceenabled {
+ _ = m.state
raceRelease(unsafe.Pointer(m))
}
diff --git a/gcc-4.8/libgo/go/sync/rwmutex.go b/gcc-4.8/libgo/go/sync/rwmutex.go
index b494c6435..3db541995 100644
--- a/gcc-4.8/libgo/go/sync/rwmutex.go
+++ b/gcc-4.8/libgo/go/sync/rwmutex.go
@@ -28,6 +28,7 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
if raceenabled {
+ _ = rw.w.state
raceDisable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
@@ -46,6 +47,7 @@ func (rw *RWMutex) RLock() {
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
if raceenabled {
+ _ = rw.w.state
raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
raceDisable()
}
@@ -69,6 +71,7 @@ func (rw *RWMutex) RUnlock() {
// the lock.
func (rw *RWMutex) Lock() {
if raceenabled {
+ _ = rw.w.state
raceDisable()
}
// First, resolve competition with other writers.
@@ -94,6 +97,7 @@ func (rw *RWMutex) Lock() {
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
if raceenabled {
+ _ = rw.w.state
raceRelease(unsafe.Pointer(&rw.readerSem))
raceRelease(unsafe.Pointer(&rw.writerSem))
raceDisable()
diff --git a/gcc-4.8/libgo/go/sync/waitgroup.go b/gcc-4.8/libgo/go/sync/waitgroup.go
index 9b0ffec58..ca3883783 100644
--- a/gcc-4.8/libgo/go/sync/waitgroup.go
+++ b/gcc-4.8/libgo/go/sync/waitgroup.go
@@ -34,10 +34,16 @@ type WaitGroup struct {
// G3: Wait() // G1 still hasn't run, G3 finds sema == 1, unblocked! Bug.
// Add adds delta, which may be negative, to the WaitGroup counter.
-// If the counter becomes zero, all goroutines blocked on Wait() are released.
+// If the counter becomes zero, all goroutines blocked on Wait are released.
// If the counter goes negative, Add panics.
+//
+// Note that calls with positive delta must happen before the call to Wait,
+// or else Wait may wait for too small a group. Typically this means the calls
+// to Add should execute before the statement creating the goroutine or
+// other event to be waited for. See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
if raceenabled {
+ _ = wg.m.state
raceReleaseMerge(unsafe.Pointer(wg))
raceDisable()
defer raceEnable()
@@ -66,6 +72,7 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
if raceenabled {
+ _ = wg.m.state
raceDisable()
}
if atomic.LoadInt32(&wg.counter) == 0 {