sync, sync/atomic: do not corrupt race detector after a nil dereference.
The race detector uses a global lock to analyze atomic
operations. A panic in the middle of the code leaves the
lock acquired.
Similarly, the sync package may leave the race detectro
inconsistent when methods are called on nil pointers.
R=golang-dev, r, minux.ma, dvyukov, rsc, adg
CC=golang-dev
https://golang.org/cl/7981043
diff --git a/src/pkg/runtime/race/testdata/atomic_test.go b/src/pkg/runtime/race/testdata/atomic_test.go
index 0c5c2c0..fc569b9 100644
--- a/src/pkg/runtime/race/testdata/atomic_test.go
+++ b/src/pkg/runtime/race/testdata/atomic_test.go
@@ -6,6 +6,7 @@
import (
"runtime"
+ "sync"
"sync/atomic"
"testing"
"unsafe"
@@ -269,3 +270,21 @@
a = 42
<-c
}
+
+// A nil pointer in an atomic operation should not deadlock
+// the rest of the program. Used to hang indefinitely.
+func TestNoRaceAtomicCrash(t *testing.T) {
+ var mutex sync.Mutex
+ var nilptr *int32
+ panics := 0
+ defer func() {
+ if x := recover(); x != nil {
+ mutex.Lock()
+ panics++
+ mutex.Unlock()
+ } else {
+ panic("no panic")
+ }
+ }()
+ atomic.AddInt32(nilptr, 1)
+}
diff --git a/src/pkg/runtime/race/testdata/sync_test.go b/src/pkg/runtime/race/testdata/sync_test.go
index e80ba3b..93af0b1 100644
--- a/src/pkg/runtime/race/testdata/sync_test.go
+++ b/src/pkg/runtime/race/testdata/sync_test.go
@@ -195,3 +195,22 @@
x = 2
<-ch
}
+
+// A nil pointer in a mutex method call should not
+// corrupt the race detector state.
+// Used to hang indefinitely.
+func TestNoRaceNilMutexCrash(t *testing.T) {
+ var mutex sync.Mutex
+ panics := 0
+ defer func() {
+ if x := recover(); x != nil {
+ mutex.Lock()
+ panics++
+ mutex.Unlock()
+ } else {
+ panic("no panic")
+ }
+ }()
+ var othermutex *sync.RWMutex
+ othermutex.RLock()
+}
diff --git a/src/pkg/sync/atomic/race.go b/src/pkg/sync/atomic/race.go
index 242bbf2..2320b57 100644
--- a/src/pkg/sync/atomic/race.go
+++ b/src/pkg/sync/atomic/race.go
@@ -25,6 +25,7 @@
}
func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -43,6 +44,7 @@
}
func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -57,6 +59,7 @@
}
func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -71,6 +74,7 @@
}
func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) {
+ _ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
@@ -89,6 +93,7 @@
}
func AddUint32(val *uint32, delta uint32) (new uint32) {
+ _ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
@@ -105,6 +110,7 @@
}
func AddUint64(val *uint64, delta uint64) (new uint64) {
+ _ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
@@ -117,6 +123,7 @@
}
func AddUintptr(val *uintptr, delta uintptr) (new uintptr) {
+ _ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
@@ -133,6 +140,7 @@
}
func LoadUint32(addr *uint32) (val uint32) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -146,6 +154,7 @@
}
func LoadUint64(addr *uint64) (val uint64) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -155,6 +164,7 @@
}
func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -164,6 +174,7 @@
}
func LoadUintptr(addr *uintptr) (val uintptr) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
@@ -177,6 +188,7 @@
}
func StoreUint32(addr *uint32, val uint32) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
@@ -189,6 +201,7 @@
}
func StoreUint64(addr *uint64, val uint64) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
@@ -197,6 +210,7 @@
}
func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
@@ -205,6 +219,7 @@
}
func StoreUintptr(addr *uintptr, val uintptr) {
+ _ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
diff --git a/src/pkg/sync/cond.go b/src/pkg/sync/cond.go
index 491b985..13547a8 100644
--- a/src/pkg/sync/cond.go
+++ b/src/pkg/sync/cond.go
@@ -57,6 +57,7 @@
//
func (c *Cond) Wait() {
if raceenabled {
+ _ = c.m.state
raceDisable()
}
c.m.Lock()
@@ -80,6 +81,7 @@
// during the call.
func (c *Cond) Signal() {
if raceenabled {
+ _ = c.m.state
raceDisable()
}
c.m.Lock()
@@ -106,6 +108,7 @@
// during the call.
func (c *Cond) Broadcast() {
if raceenabled {
+ _ = c.m.state
raceDisable()
}
c.m.Lock()
diff --git a/src/pkg/sync/mutex.go b/src/pkg/sync/mutex.go
index b4629eb..73b3377 100644
--- a/src/pkg/sync/mutex.go
+++ b/src/pkg/sync/mutex.go
@@ -81,6 +81,7 @@
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
if raceenabled {
+ _ = m.state
raceRelease(unsafe.Pointer(m))
}
diff --git a/src/pkg/sync/rwmutex.go b/src/pkg/sync/rwmutex.go
index b494c64..3db5419 100644
--- a/src/pkg/sync/rwmutex.go
+++ b/src/pkg/sync/rwmutex.go
@@ -28,6 +28,7 @@
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
if raceenabled {
+ _ = rw.w.state
raceDisable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
@@ -46,6 +47,7 @@
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
if raceenabled {
+ _ = rw.w.state
raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
raceDisable()
}
@@ -69,6 +71,7 @@
// the lock.
func (rw *RWMutex) Lock() {
if raceenabled {
+ _ = rw.w.state
raceDisable()
}
// First, resolve competition with other writers.
@@ -94,6 +97,7 @@
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
if raceenabled {
+ _ = rw.w.state
raceRelease(unsafe.Pointer(&rw.readerSem))
raceRelease(unsafe.Pointer(&rw.writerSem))
raceDisable()
diff --git a/src/pkg/sync/waitgroup.go b/src/pkg/sync/waitgroup.go
index 1277f1c..ca38837 100644
--- a/src/pkg/sync/waitgroup.go
+++ b/src/pkg/sync/waitgroup.go
@@ -43,6 +43,7 @@
// other event to be waited for. See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
if raceenabled {
+ _ = wg.m.state
raceReleaseMerge(unsafe.Pointer(wg))
raceDisable()
defer raceEnable()
@@ -71,6 +72,7 @@
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
if raceenabled {
+ _ = wg.m.state
raceDisable()
}
if atomic.LoadInt32(&wg.counter) == 0 {