runtime, syscall: add calls to msan functions
Add explicit memory sanitizer instrumentation to the runtime and syscall
packages. The compiler does not instrument the runtime package. It
does instrument the syscall package, but we need to add a couple of
cases that it can't see.
Change-Id: I2d66073f713fe67e33a6720460d2bb8f72f31394
Reviewed-on: https://go-review.googlesource.com/16164
Reviewed-by: David Crawshaw <crawshaw@golang.org>
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index cfee12a..96ac306 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -108,6 +108,9 @@
if raceenabled {
raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
}
+ if msanenabled {
+ msanread(ep, t.elem.size)
+ }
if c == nil {
if !block {
diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go
index 2db73bc..d59ad29 100644
--- a/src/runtime/hashmap.go
+++ b/src/runtime/hashmap.go
@@ -276,6 +276,9 @@
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr))
}
@@ -324,6 +327,9 @@
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
@@ -419,6 +425,10 @@
raceReadObjectPC(t.key, key, callerpc, pc)
raceReadObjectPC(t.elem, val, callerpc, pc)
}
+ if msanenabled {
+ msanread(key, t.key.size)
+ msanread(val, t.elem.size)
+ }
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
@@ -517,6 +527,9 @@
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
if h == nil || h.count == 0 {
return
}
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index 646f878..f04cec0 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -132,6 +132,9 @@
if raceenabled {
raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2E))
}
+ if msanenabled {
+ msanread(elem, t.size)
+ }
ep := (*eface)(unsafe.Pointer(&e))
if isDirectIface(t) {
ep._type = t
@@ -153,6 +156,9 @@
if raceenabled {
raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2I))
}
+ if msanenabled {
+ msanread(elem, t.size)
+ }
tab := (*itab)(atomicloadp(unsafe.Pointer(cache)))
if tab == nil {
tab = getitab(inter, t, false)
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 4ce159c..564e229 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -707,6 +707,9 @@
if raceenabled {
racemalloc(x, size)
}
+ if msanenabled {
+ msanmalloc(x, size)
+ }
mp.mallocing = 0
releasem(mp)
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 0dbe1ff..6ca2672 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -241,6 +241,10 @@
racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
}
+ if msanenabled {
+ msanwrite(dstp, uintptr(n)*typ.size)
+ msanread(srcp, uintptr(n)*typ.size)
+ }
// Note: No point in checking typ.kind&kindNoPointers here:
// compiler only emits calls to typedslicecopy for types with pointers,
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 9468af9..7c7f1e8 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -233,6 +233,9 @@
if debug.allocfreetrace != 0 {
tracefree(unsafe.Pointer(p), size)
}
+ if msanenabled {
+ msanfree(unsafe.Pointer(p), size)
+ }
// Reset to allocated+noscan.
if cl == 0 {
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 2477637..e4ca940 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -2695,6 +2695,9 @@
if raceenabled {
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
}
+ if msanenabled {
+ msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
+ }
}
}
return gp
diff --git a/src/runtime/select.go b/src/runtime/select.go
index b18b44c..8b6c3ed 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -459,6 +459,13 @@
raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
}
}
+ if msanenabled {
+ if cas.kind == caseRecv && cas.elem != nil {
+ msanwrite(cas.elem, c.elemtype.size)
+ } else if cas.kind == caseSend {
+ msanread(cas.elem, c.elemtype.size)
+ }
+ }
selunlock(sel)
goto retc
@@ -472,6 +479,9 @@
raceacquire(chanbuf(c, c.recvx))
racerelease(chanbuf(c, c.recvx))
}
+ if msanenabled && cas.elem != nil {
+ msanwrite(cas.elem, c.elemtype.size)
+ }
if cas.receivedp != nil {
*cas.receivedp = true
}
@@ -504,6 +514,9 @@
racerelease(chanbuf(c, c.sendx))
raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
}
+ if msanenabled {
+ msanread(cas.elem, c.elemtype.size)
+ }
typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
c.sendx++
if c.sendx == c.dataqsiz {
@@ -531,6 +544,9 @@
}
racesync(c, sg)
}
+ if msanenabled && cas.elem != nil {
+ msanwrite(cas.elem, c.elemtype.size)
+ }
selunlock(sel)
if debugSelect {
print("syncrecv: sel=", sel, " c=", c, "\n")
@@ -570,6 +586,9 @@
raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
racesync(c, sg)
}
+ if msanenabled {
+ msanread(cas.elem, c.elemtype.size)
+ }
selunlock(sel)
if debugSelect {
print("syncsend: sel=", sel, " c=", c, "\n")
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index b316cdd..a0b0a7c 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -57,6 +57,9 @@
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadrangepc(old.array, uintptr(old.len*int(t.elem.size)), callerpc, funcPC(growslice))
}
+ if msanenabled {
+ msanread(old.array, uintptr(old.len*int(t.elem.size)))
+ }
et := t.elem
if et.size == 0 {
@@ -127,6 +130,10 @@
racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc)
racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc)
}
+ if msanenabled {
+ msanwrite(to.array, uintptr(n*int(width)))
+ msanread(fm.array, uintptr(n*int(width)))
+ }
size := uintptr(n) * width
if size == 1 { // common case worth about 2x to do here
@@ -153,6 +160,9 @@
pc := funcPC(slicestringcopy)
racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc)
}
+ if msanenabled {
+ msanwrite(unsafe.Pointer(&to[0]), uintptr(n))
+ }
memmove(unsafe.Pointer(&to[0]), unsafe.Pointer(stringStructOf(&fm).str), uintptr(n))
return n
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 1809a4d9a..e5cd094 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -364,6 +364,9 @@
if raceenabled {
racemalloc(v, uintptr(n))
}
+ if msanenabled {
+ msanmalloc(v, uintptr(n))
+ }
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
@@ -393,6 +396,9 @@
}
return
}
+ if msanenabled {
+ msanfree(v, n)
+ }
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
diff --git a/src/runtime/string.go b/src/runtime/string.go
index 680001d..03230a8 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -86,6 +86,9 @@
getcallerpc(unsafe.Pointer(&b)),
funcPC(slicebytetostring))
}
+ if msanenabled && l > 0 {
+ msanread(unsafe.Pointer(&b[0]), uintptr(l))
+ }
s, c := rawstringtmp(buf, l)
copy(c, b)
return s
@@ -126,6 +129,9 @@
getcallerpc(unsafe.Pointer(&b)),
funcPC(slicebytetostringtmp))
}
+ if msanenabled && len(b) > 0 {
+ msanread(unsafe.Pointer(&b[0]), uintptr(len(b)))
+ }
return *(*string)(unsafe.Pointer(&b))
}
@@ -185,6 +191,9 @@
getcallerpc(unsafe.Pointer(&a)),
funcPC(slicerunetostring))
}
+ if msanenabled && len(a) > 0 {
+ msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
+ }
var dum [4]byte
size1 := 0
for _, r := range a {