blob: c00b866680a2534900091979c2a7e3054d0b56a3 [file] [log] [blame]
Raul Silvera27ee7192015-09-14 14:03:45 -07001// run
2
3// Copyright 2009 The Go Authors. All rights reserved.
4// Use of this source code is governed by a BSD-style
5// license that can be found in the LICENSE file.
6
7// Test heap sampling logic.
8
9package main
10
11import (
12 "fmt"
13 "math"
14 "runtime"
15)
16
17var a16 *[16]byte
18var a512 *[512]byte
19var a256 *[256]byte
20var a1k *[1024]byte
21var a64k *[64 * 1024]byte
22
23// This test checks that heap sampling produces reasonable
24// results. Note that heap sampling uses randomization, so the results
25// vary for run to run. This test only checks that the resulting
26// values appear reasonable.
27func main() {
Raul Silvera27ee7192015-09-14 14:03:45 -070028 const countInterleaved = 10000
29 allocInterleaved(countInterleaved)
30 checkAllocations(getMemProfileRecords(), "main.allocInterleaved", countInterleaved, []int64{256 * 1024, 1024, 256 * 1024, 512, 256 * 1024, 256})
31
32 const count = 100000
33 alloc(count)
34 checkAllocations(getMemProfileRecords(), "main.alloc", count, []int64{1024, 512, 256})
35}
36
37// allocInterleaved stress-tests the heap sampling logic by
38// interleaving large and small allocations.
39func allocInterleaved(n int) {
40 for i := 0; i < n; i++ {
41 // Test verification depends on these lines being contiguous.
42 a64k = new([64 * 1024]byte)
43 a1k = new([1024]byte)
44 a64k = new([64 * 1024]byte)
45 a512 = new([512]byte)
46 a64k = new([64 * 1024]byte)
47 a256 = new([256]byte)
48 }
49}
50
51// alloc performs only small allocations for sanity testing.
52func alloc(n int) {
53 for i := 0; i < n; i++ {
54 // Test verification depends on these lines being contiguous.
55 a1k = new([1024]byte)
56 a512 = new([512]byte)
57 a256 = new([256]byte)
58 }
59}
60
61// checkAllocations validates that the profile records collected for
62// the named function are consistent with count contiguous allocations
63// of the specified sizes.
64func checkAllocations(records []runtime.MemProfileRecord, fname string, count int64, size []int64) {
65 a := allocObjects(records, fname)
66 firstLine := 0
67 for ln := range a {
68 if firstLine == 0 || firstLine > ln {
69 firstLine = ln
70 }
71 }
72 var totalcount int64
73 for i, w := range size {
74 ln := firstLine + i
75 s := a[ln]
76 checkValue(fname, ln, "objects", count, s.objects)
77 checkValue(fname, ln, "bytes", count*w, s.bytes)
78 totalcount += s.objects
79 }
80 // Check the total number of allocations, to ensure some sampling occurred.
81 if totalwant := count * int64(len(size)); totalcount <= 0 || totalcount > totalwant*1024 {
82 panic(fmt.Sprintf("%s want total count > 0 && <= %d, got %d", fname, totalwant*1024, totalcount))
83 }
84}
85
86// checkValue checks an unsampled value against a range.
87func checkValue(fname string, ln int, name string, want, got int64) {
88 if got < 0 || got > 1024*want {
89 panic(fmt.Sprintf("%s:%d want %s >= 0 && <= %d, got %d", fname, ln, name, 1024*want, got))
90 }
91}
92
93func getMemProfileRecords() []runtime.MemProfileRecord {
Austin Clementsb5a0c672015-11-12 11:30:26 -050094 // Force the runtime to update the object and byte counts.
Austin Clementsa9ca2132015-11-12 12:24:36 -050095 // This can take up to two GC cycles to get a complete
96 // snapshot of the current point in time.
97 runtime.GC()
Austin Clementsb5a0c672015-11-12 11:30:26 -050098 runtime.GC()
99
Raul Silvera27ee7192015-09-14 14:03:45 -0700100 // Find out how many records there are (MemProfile(nil, true)),
101 // allocate that many records, and get the data.
102 // There's a race—more records might be added between
103 // the two calls—so allocate a few extra records for safety
104 // and also try again if we're very unlucky.
105 // The loop should only execute one iteration in the common case.
106 var p []runtime.MemProfileRecord
107 n, ok := runtime.MemProfile(nil, true)
108 for {
109 // Allocate room for a slightly bigger profile,
110 // in case a few more entries have been added
111 // since the call to MemProfile.
112 p = make([]runtime.MemProfileRecord, n+50)
113 n, ok = runtime.MemProfile(p, true)
114 if ok {
115 p = p[0:n]
116 break
117 }
118 // Profile grew; try again.
119 }
120 return p
121}
122
123type allocStat struct {
124 bytes, objects int64
125}
126
127// allocObjects examines the profile records for the named function
128// and returns the allocation stats aggregated by source line number.
129func allocObjects(records []runtime.MemProfileRecord, function string) map[int]allocStat {
130 a := make(map[int]allocStat)
131 for _, r := range records {
132 for _, s := range r.Stack0 {
133 if s == 0 {
134 break
135 }
136 if f := runtime.FuncForPC(s); f != nil {
137 name := f.Name()
138 _, line := f.FileLine(s)
139 if name == function {
140 allocStat := a[line]
141 allocStat.bytes += r.AllocBytes
142 allocStat.objects += r.AllocObjects
143 a[line] = allocStat
144 }
145 }
146 }
147 }
148 for line, stats := range a {
149 objects, bytes := scaleHeapSample(stats.objects, stats.bytes, int64(runtime.MemProfileRate))
150 a[line] = allocStat{bytes, objects}
151 }
152 return a
153}
154
155// scaleHeapSample unsamples heap allocations.
156// Taken from src/cmd/pprof/internal/profile/legacy_profile.go
157func scaleHeapSample(count, size, rate int64) (int64, int64) {
158 if count == 0 || size == 0 {
159 return 0, 0
160 }
161
162 if rate <= 1 {
163 // if rate==1 all samples were collected so no adjustment is needed.
164 // if rate<1 treat as unknown and skip scaling.
165 return count, size
166 }
167
168 avgSize := float64(size) / float64(count)
169 scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
170
171 return int64(float64(count) * scale), int64(float64(size) * scale)
172}