blob: e028554b2386d5218194baac291b98a9f6a99149 [file] [log] [blame]
Dmitriy Vyukov915784e2013-05-15 21:22:32 +04001// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
Russ Coxc3dadca2013-10-02 11:59:53 -04008 "flag"
Austin Clementsbda74b02017-02-23 21:40:55 -05009 "fmt"
Austin Clements4946d9e2018-04-04 12:54:47 -040010 "internal/race"
Austin Clements2b415542017-12-19 22:05:23 -080011 "internal/testenv"
12 "os"
13 "os/exec"
Austin Clementsbda74b02017-02-23 21:40:55 -050014 "reflect"
Michael Anthony Knyszek5756b352020-09-10 21:20:46 +000015 "runtime"
Dmitriy Vyukova33ef8d2013-09-06 16:55:40 -040016 . "runtime"
Austin Clements2b415542017-12-19 22:05:23 -080017 "strings"
Michael Anthony Knyszek5756b352020-09-10 21:20:46 +000018 "sync/atomic"
Dmitriy Vyukov915784e2013-05-15 21:22:32 +040019 "testing"
Russ Coxc3dadca2013-10-02 11:59:53 -040020 "time"
Dmitriy Vyukov915784e2013-05-15 21:22:32 +040021 "unsafe"
22)
23
Ian Lance Taylor9b7a8aa2018-07-08 16:42:33 -070024var testMemStatsCount int
25
Dmitriy Vyukova33ef8d2013-09-06 16:55:40 -040026func TestMemStats(t *testing.T) {
Ian Lance Taylor9b7a8aa2018-07-08 16:42:33 -070027 testMemStatsCount++
28
Austin Clements01c6a192016-12-06 17:42:42 -050029 // Make sure there's at least one forced GC.
30 GC()
31
Dmitriy Vyukova33ef8d2013-09-06 16:55:40 -040032 // Test that MemStats has sane values.
33 st := new(MemStats)
34 ReadMemStats(st)
Dmitriy Vyukov30ef2c72014-08-19 11:46:05 +040035
Austin Clementsbda74b02017-02-23 21:40:55 -050036 nz := func(x interface{}) error {
37 if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
38 return nil
39 }
40 return fmt.Errorf("zero value")
41 }
Austin Clementsef1829d2017-02-23 21:48:34 -050042 le := func(thresh float64) func(interface{}) error {
Austin Clementsbda74b02017-02-23 21:40:55 -050043 return func(x interface{}) error {
Ian Lance Taylor9b7a8aa2018-07-08 16:42:33 -070044 // These sanity tests aren't necessarily valid
45 // with high -test.count values, so only run
46 // them once.
47 if testMemStatsCount > 1 {
48 return nil
49 }
50
Austin Clementsef1829d2017-02-23 21:48:34 -050051 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
Austin Clementsbda74b02017-02-23 21:40:55 -050052 return nil
53 }
Austin Clementsef1829d2017-02-23 21:48:34 -050054 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
Austin Clementsbda74b02017-02-23 21:40:55 -050055 }
56 }
Austin Clementsef1829d2017-02-23 21:48:34 -050057 eq := func(x interface{}) func(interface{}) error {
58 return func(y interface{}) error {
59 if x == y {
60 return nil
61 }
62 return fmt.Errorf("want %v", x)
63 }
64 }
65 // Of the uint fields, HeapReleased, HeapIdle can be 0.
66 // PauseTotalNs can be 0 if timer resolution is poor.
Austin Clementsbda74b02017-02-23 21:40:55 -050067 fields := map[string][]func(interface{}) error{
68 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
Austin Clements41e6abd2017-12-04 10:43:11 -050069 "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
Austin Clementsbda74b02017-02-23 21:40:55 -050070 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
Austin Clementsef1829d2017-02-23 21:48:34 -050071 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
Austin Clementsbda74b02017-02-23 21:40:55 -050072 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
73 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
74 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
75 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
Austin Clementsef1829d2017-02-23 21:48:34 -050076 "NextGC": {nz, le(1e10)}, "LastGC": {nz},
Austin Clementsbda74b02017-02-23 21:40:55 -050077 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
Austin Clementsef1829d2017-02-23 21:48:34 -050078 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
Austin Clements4c02eaf2017-07-10 14:34:26 -040079 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
Austin Clementsbda74b02017-02-23 21:40:55 -050080 "BySize": nil,
Dmitriy Vyukova33ef8d2013-09-06 16:55:40 -040081 }
Dmitriy Vyukov30ef2c72014-08-19 11:46:05 +040082
Austin Clementsbda74b02017-02-23 21:40:55 -050083 rst := reflect.ValueOf(st).Elem()
84 for i := 0; i < rst.Type().NumField(); i++ {
85 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
86 checks, ok := fields[name]
87 if !ok {
88 t.Errorf("unknown MemStats field %s", name)
89 continue
90 }
91 for _, check := range checks {
92 if err := check(val); err != nil {
93 t.Errorf("%s = %v: %s", name, val, err)
94 }
95 }
Dmitriy Vyukov30ef2c72014-08-19 11:46:05 +040096 }
97
Dmitriy Vyukova33ef8d2013-09-06 16:55:40 -040098 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
99 st.BuckHashSys+st.GCSys+st.OtherSys {
100 t.Fatalf("Bad sys value: %+v", *st)
101 }
Sanjay Menakuru639dc6c2014-08-25 11:29:53 +0400102
103 if st.HeapIdle+st.HeapInuse != st.HeapSys {
104 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
105 }
Austin Clementsad60cd82015-06-30 18:20:13 -0400106
107 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
108 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
109 }
Austin Clements777ab5c2015-07-01 11:04:19 -0400110
111 var pauseTotal uint64
112 for _, pause := range st.PauseNs {
113 pauseTotal += pause
114 }
115 if int(st.NumGC) < len(st.PauseNs) {
116 // We have all pauses, so this should be exact.
117 if st.PauseTotalNs != pauseTotal {
118 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
119 }
Austin Clements5de3ff22015-12-14 18:04:40 -0500120 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
121 if st.PauseNs[i] != 0 {
122 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
123 }
124 if st.PauseEnd[i] != 0 {
125 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
126 }
127 }
Austin Clements777ab5c2015-07-01 11:04:19 -0400128 } else {
129 if st.PauseTotalNs < pauseTotal {
130 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
131 }
132 }
Austin Clements01c6a192016-12-06 17:42:42 -0500133
134 if st.NumForcedGC > st.NumGC {
135 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
136 }
Dmitriy Vyukova33ef8d2013-09-06 16:55:40 -0400137}
138
Dmitry Vyukov205ae072015-01-22 17:56:12 +0300139func TestStringConcatenationAllocs(t *testing.T) {
140 n := testing.AllocsPerRun(1e3, func() {
141 b := make([]byte, 10)
142 for i := 0; i < 10; i++ {
143 b[i] = byte(i) + '0'
144 }
145 s := "foo" + string(b)
146 if want := "foo0123456789"; s != want {
147 t.Fatalf("want %v, got %v", want, s)
148 }
149 })
150 // Only string concatenation allocates.
151 if n != 1 {
152 t.Fatalf("want 1 allocation, got %v", n)
153 }
154}
155
Matthew Dempskyd18167f2015-10-26 12:38:47 -0700156func TestTinyAlloc(t *testing.T) {
Keith Randall2c05ba42021-04-28 23:07:38 -0700157 if runtime.Raceenabled {
158 t.Skip("tinyalloc suppressed when running in race mode")
159 }
Matthew Dempskyd18167f2015-10-26 12:38:47 -0700160 const N = 16
161 var v [N]unsafe.Pointer
162 for i := range v {
163 v[i] = unsafe.Pointer(new(byte))
164 }
165
166 chunks := make(map[uintptr]bool, N)
167 for _, p := range v {
168 chunks[uintptr(p)&^7] = true
169 }
170
171 if len(chunks) == N {
172 t.Fatal("no bytes allocated within the same 8-byte chunk")
173 }
174}
175
Michael Anthony Knyszek5756b352020-09-10 21:20:46 +0000176var (
177 tinyByteSink *byte
178 tinyUint32Sink *uint32
179 tinyObj12Sink *obj12
180)
181
182type obj12 struct {
183 a uint64
184 b uint32
185}
186
187func TestTinyAllocIssue37262(t *testing.T) {
Keith Randall2c05ba42021-04-28 23:07:38 -0700188 if runtime.Raceenabled {
189 t.Skip("tinyalloc suppressed when running in race mode")
190 }
Michael Anthony Knyszek5756b352020-09-10 21:20:46 +0000191 // Try to cause an alignment access fault
192 // by atomically accessing the first 64-bit
193 // value of a tiny-allocated object.
194 // See issue 37262 for details.
195
196 // GC twice, once to reach a stable heap state
197 // and again to make sure we finish the sweep phase.
198 runtime.GC()
199 runtime.GC()
200
201 // Make 1-byte allocations until we get a fresh tiny slot.
202 aligned := false
203 for i := 0; i < 16; i++ {
204 tinyByteSink = new(byte)
205 if uintptr(unsafe.Pointer(tinyByteSink))&0xf == 0xf {
206 aligned = true
207 break
208 }
209 }
210 if !aligned {
211 t.Fatal("unable to get a fresh tiny slot")
212 }
213
214 // Create a 4-byte object so that the current
215 // tiny slot is partially filled.
216 tinyUint32Sink = new(uint32)
217
218 // Create a 12-byte object, which fits into the
219 // tiny slot. If it actually gets place there,
220 // then the field "a" will be improperly aligned
221 // for atomic access on 32-bit architectures.
222 // This won't be true if issue 36606 gets resolved.
223 tinyObj12Sink = new(obj12)
224
225 // Try to atomically access "x.a".
226 atomic.StoreUint64(&tinyObj12Sink.a, 10)
227
228 // Clear the sinks.
229 tinyByteSink = nil
230 tinyUint32Sink = nil
231 tinyObj12Sink = nil
232}
233
Michael Anthony Knyszeka2cd2bd2019-09-16 21:23:24 +0000234func TestPageCacheLeak(t *testing.T) {
235 defer GOMAXPROCS(GOMAXPROCS(1))
236 leaked := PageCachePagesLeaked()
237 if leaked != 0 {
238 t.Fatalf("found %d leaked pages in page caches", leaked)
239 }
240}
241
Michael Anthony Knyszek78364572018-10-01 19:58:01 +0000242func TestPhysicalMemoryUtilization(t *testing.T) {
243 got := runTestProg(t, "testprog", "GCPhys")
244 want := "OK\n"
245 if got != want {
246 t.Fatalf("expected %q, but got %q", want, got)
247 }
248}
249
Michael Anthony Knyszek689f6f72019-10-17 17:42:15 +0000250func TestScavengedBitsCleared(t *testing.T) {
Michael Anthony Knyszek689f6f72019-10-17 17:42:15 +0000251 var mismatches [128]BitsMismatch
252 if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
253 t.Errorf("uncleared scavenged bits")
254 for _, m := range mismatches[:n] {
255 t.Logf("\t@ address 0x%x", m.Base)
256 t.Logf("\t| got: %064b", m.Got)
257 t.Logf("\t| want: %064b", m.Want)
258 }
259 t.FailNow()
260 }
261}
262
Austin Clements2b415542017-12-19 22:05:23 -0800263type acLink struct {
264 x [1 << 20]byte
265}
266
267var arenaCollisionSink []*acLink
268
269func TestArenaCollision(t *testing.T) {
Elias Naurba994332018-02-16 10:26:00 +0100270 testenv.MustHaveExec(t)
271
Austin Clements2b415542017-12-19 22:05:23 -0800272 // Test that mheap.sysAlloc handles collisions with other
273 // memory mappings.
274 if os.Getenv("TEST_ARENA_COLLISION") != "1" {
275 cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
276 cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
Austin Clements4946d9e2018-04-04 12:54:47 -0400277 out, err := cmd.CombinedOutput()
278 if race.Enabled {
279 // This test runs the runtime out of hint
280 // addresses, so it will start mapping the
281 // heap wherever it can. The race detector
282 // doesn't support this, so look for the
283 // expected failure.
284 if want := "too many address space collisions"; !strings.Contains(string(out), want) {
285 t.Fatalf("want %q, got:\n%s", want, string(out))
286 }
287 } else if !strings.Contains(string(out), "PASS\n") || err != nil {
Austin Clements2b415542017-12-19 22:05:23 -0800288 t.Fatalf("%s\n(exit status %v)", string(out), err)
289 }
290 return
291 }
292 disallowed := [][2]uintptr{}
293 // Drop all but the next 3 hints. 64-bit has a lot of hints,
294 // so it would take a lot of memory to go through all of them.
295 KeepNArenaHints(3)
296 // Consume these 3 hints and force the runtime to find some
297 // fallback hints.
298 for i := 0; i < 5; i++ {
299 // Reserve memory at the next hint so it can't be used
300 // for the heap.
301 start, end := MapNextArenaHint()
302 disallowed = append(disallowed, [2]uintptr{start, end})
303 // Allocate until the runtime tries to use the hint we
304 // just mapped over.
305 hint := GetNextArenaHint()
306 for GetNextArenaHint() == hint {
307 ac := new(acLink)
308 arenaCollisionSink = append(arenaCollisionSink, ac)
309 // The allocation must not have fallen into
310 // one of the reserved regions.
311 p := uintptr(unsafe.Pointer(ac))
312 for _, d := range disallowed {
313 if d[0] <= p && p < d[1] {
314 t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
315 }
316 }
317 }
318 }
319}
320
Dmitriy Vyukov915784e2013-05-15 21:22:32 +0400321var mallocSink uintptr
322
323func BenchmarkMalloc8(b *testing.B) {
324 var x uintptr
325 for i := 0; i < b.N; i++ {
326 p := new(int64)
327 x ^= uintptr(unsafe.Pointer(p))
328 }
329 mallocSink = x
330}
331
332func BenchmarkMalloc16(b *testing.B) {
333 var x uintptr
334 for i := 0; i < b.N; i++ {
335 p := new([2]int64)
336 x ^= uintptr(unsafe.Pointer(p))
337 }
338 mallocSink = x
339}
340
341func BenchmarkMallocTypeInfo8(b *testing.B) {
342 var x uintptr
343 for i := 0; i < b.N; i++ {
344 p := new(struct {
345 p [8 / unsafe.Sizeof(uintptr(0))]*int
346 })
347 x ^= uintptr(unsafe.Pointer(p))
348 }
349 mallocSink = x
350}
351
352func BenchmarkMallocTypeInfo16(b *testing.B) {
353 var x uintptr
354 for i := 0; i < b.N; i++ {
355 p := new(struct {
356 p [16 / unsafe.Sizeof(uintptr(0))]*int
357 })
358 x ^= uintptr(unsafe.Pointer(p))
359 }
360 mallocSink = x
361}
Russ Coxc3dadca2013-10-02 11:59:53 -0400362
Dmitriy Vyukovcd17a712014-07-29 11:01:02 +0400363type LargeStruct struct {
364 x [16][]byte
365}
366
367func BenchmarkMallocLargeStruct(b *testing.B) {
368 var x uintptr
369 for i := 0; i < b.N; i++ {
370 p := make([]LargeStruct, 2)
371 x ^= uintptr(unsafe.Pointer(&p[0]))
372 }
373 mallocSink = x
374}
375
Russ Coxc3dadca2013-10-02 11:59:53 -0400376var n = flag.Int("n", 1000, "number of goroutines")
377
378func BenchmarkGoroutineSelect(b *testing.B) {
379 quit := make(chan struct{})
380 read := func(ch chan struct{}) {
381 for {
382 select {
383 case _, ok := <-ch:
384 if !ok {
385 return
386 }
387 case <-quit:
388 return
389 }
390 }
391 }
392 benchHelper(b, *n, read)
393}
394
395func BenchmarkGoroutineBlocking(b *testing.B) {
396 read := func(ch chan struct{}) {
397 for {
398 if _, ok := <-ch; !ok {
399 return
400 }
401 }
402 }
403 benchHelper(b, *n, read)
404}
405
406func BenchmarkGoroutineForRange(b *testing.B) {
407 read := func(ch chan struct{}) {
Robert Griesemer8a23c002014-07-16 16:29:51 -0700408 for range ch {
Russ Coxc3dadca2013-10-02 11:59:53 -0400409 }
410 }
411 benchHelper(b, *n, read)
412}
413
414func benchHelper(b *testing.B, n int, read func(chan struct{})) {
415 m := make([]chan struct{}, n)
416 for i := range m {
417 m[i] = make(chan struct{}, 1)
418 go read(m[i])
419 }
420 b.StopTimer()
421 b.ResetTimer()
422 GC()
423
424 for i := 0; i < b.N; i++ {
425 for _, ch := range m {
426 if ch != nil {
427 ch <- struct{}{}
428 }
429 }
430 time.Sleep(10 * time.Millisecond)
431 b.StartTimer()
432 GC()
433 b.StopTimer()
434 }
435
436 for _, ch := range m {
437 close(ch)
438 }
439 time.Sleep(10 * time.Millisecond)
440}
441
442func BenchmarkGoroutineIdle(b *testing.B) {
443 quit := make(chan struct{})
444 fn := func() {
445 <-quit
446 }
447 for i := 0; i < *n; i++ {
448 go fn()
449 }
450
451 GC()
452 b.ResetTimer()
453
454 for i := 0; i < b.N; i++ {
455 GC()
456 }
457
458 b.StopTimer()
459 close(quit)
460 time.Sleep(10 * time.Millisecond)
461}