runtime: disable scavenger on 64k page size kernels
Update #9993
If the physical page size of the machine is larger than the logical
heap size, for example 8k logical, 64k physical, then madvise(2) will
round up the requested amount to a 64k boundary and may discard pages
close to the page being madvised.
This patch disables the scavenger in these situations, which at the moment
is only ppc64 and ppc64le systems. NaCl also uses a 64k page size, but
it's not clear if it is affected by this problem.
Change-Id: Ib897f8d3df5bd915ddc0b510f2fd90a30ef329ca
Reviewed-on: https://go-review.googlesource.com/6091
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
diff --git a/src/runtime/debug/garbage_test.go b/src/runtime/debug/garbage_test.go
index 54c33bd..a392614 100644
--- a/src/runtime/debug/garbage_test.go
+++ b/src/runtime/debug/garbage_test.go
@@ -88,6 +88,10 @@
var big = make([]byte, 1<<20)
func TestFreeOSMemory(t *testing.T) {
+ switch runtime.GOARCH {
+ case "ppc64", "ppc64le", "nacl":
+ t.Skip("issue 9993; scavenger temporarily disabled on systems with 64k pages")
+ }
var ms1, ms2 runtime.MemStats
if big == nil {
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index a05a570..94ef4de 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -717,6 +717,15 @@
}
func scavengelist(list *mspan, now, limit uint64) uintptr {
+ if _PhysPageSize > _PageSize {
+ // golang.org/issue/9993
+ // If the physical page size of the machine is larger than
+ // our logical heap page size the kernel may round up the
+ // amount to be freed to its page size and corrupt the heap
+ // pages surrounding the unused block.
+ return 0
+ }
+
if mSpanList_IsEmpty(list) {
return 0
}