Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 1 | // Copyright 2014 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 7 | import "unsafe" |
| 8 | |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 9 | var indexError = error(errorString("index out of range")) |
| 10 | |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 11 | func panicindex() { |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 12 | panic(indexError) |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 13 | } |
| 14 | |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 15 | var sliceError = error(errorString("slice bounds out of range")) |
| 16 | |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 17 | func panicslice() { |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 18 | panic(sliceError) |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 19 | } |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 20 | |
| 21 | var divideError = error(errorString("integer divide by zero")) |
| 22 | |
| 23 | func panicdivide() { |
| 24 | panic(divideError) |
| 25 | } |
| 26 | |
Russ Cox | c81a0ed | 2014-09-08 14:05:23 -0400 | [diff] [blame] | 27 | var overflowError = error(errorString("integer overflow")) |
| 28 | |
| 29 | func panicoverflow() { |
| 30 | panic(overflowError) |
| 31 | } |
| 32 | |
| 33 | var floatError = error(errorString("floating point error")) |
| 34 | |
| 35 | func panicfloat() { |
| 36 | panic(floatError) |
| 37 | } |
| 38 | |
| 39 | var memoryError = error(errorString("invalid memory address or nil pointer dereference")) |
| 40 | |
| 41 | func panicmem() { |
| 42 | panic(memoryError) |
| 43 | } |
| 44 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 45 | func throwreturn() { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 46 | throw("no return at end of a typed function - compiler is broken") |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | func throwinit() { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 50 | throw("recursive call during initialization - linker skew") |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | // Create a new deferred function fn with siz bytes of arguments. |
| 54 | // The compiler turns a defer statement into a call to this. |
| 55 | //go:nosplit |
| 56 | func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 57 | if getg().m.curg != getg() { |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 58 | // go code on the system stack can't defer |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 59 | throw("defer on system stack") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 60 | } |
| 61 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 62 | // the arguments of fn are in a perilous state. The stack map |
| 63 | // for deferproc does not describe them. So we can't let garbage |
| 64 | // collection or stack copying trigger until we've copied them out |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 65 | // to somewhere safe. The memmove below does that. |
| 66 | // Until the copy completes, we can only call nosplit routines. |
| 67 | sp := getcallersp(unsafe.Pointer(&siz)) |
| 68 | argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 69 | callerpc := getcallerpc(unsafe.Pointer(&siz)) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 70 | |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 71 | systemstack(func() { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 72 | d := newdefer(siz) |
| 73 | if d._panic != nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 74 | throw("deferproc: d.panic != nil after newdefer") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 75 | } |
| 76 | d.fn = fn |
| 77 | d.pc = callerpc |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 78 | d.sp = sp |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 79 | memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz)) |
| 80 | }) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 81 | |
| 82 | // deferproc returns 0 normally. |
| 83 | // a deferred func that stops a panic |
| 84 | // makes the deferproc return 1. |
| 85 | // the code the compiler generates always |
| 86 | // checks the return value and jumps to the |
| 87 | // end of the function if deferproc returns != 0. |
| 88 | return0() |
| 89 | // No code can go here - the C return register has |
| 90 | // been set and must not be clobbered. |
| 91 | } |
| 92 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 93 | // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... |
| 94 | // Each P holds a pool for defers with small arg sizes. |
| 95 | // Assign defer allocations to pools by rounding to 16, to match malloc size classes. |
| 96 | |
| 97 | const ( |
| 98 | deferHeaderSize = unsafe.Sizeof(_defer{}) |
| 99 | minDeferAlloc = (deferHeaderSize + 15) &^ 15 |
| 100 | minDeferArgs = minDeferAlloc - deferHeaderSize |
| 101 | ) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 102 | |
| 103 | // defer size class for arg size sz |
Russ Cox | 857d55a | 2014-09-08 17:37:49 -0400 | [diff] [blame] | 104 | //go:nosplit |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 105 | func deferclass(siz uintptr) uintptr { |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 106 | if siz <= minDeferArgs { |
| 107 | return 0 |
| 108 | } |
| 109 | return (siz - minDeferArgs + 15) / 16 |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | // total size of memory block for defer with arg size sz |
| 113 | func totaldefersize(siz uintptr) uintptr { |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 114 | if siz <= minDeferArgs { |
| 115 | return minDeferAlloc |
| 116 | } |
| 117 | return deferHeaderSize + siz |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | // Ensure that defer arg sizes that map to the same defer size class |
| 121 | // also map to the same malloc size class. |
| 122 | func testdefersizes() { |
| 123 | var m [len(p{}.deferpool)]int32 |
| 124 | |
| 125 | for i := range m { |
| 126 | m[i] = -1 |
| 127 | } |
| 128 | for i := uintptr(0); ; i++ { |
| 129 | defersc := deferclass(i) |
| 130 | if defersc >= uintptr(len(m)) { |
| 131 | break |
| 132 | } |
Keith Randall | 0bb8fc6 | 2014-12-28 23:16:32 -0800 | [diff] [blame] | 133 | siz := roundupsize(totaldefersize(i)) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 134 | if m[defersc] < 0 { |
| 135 | m[defersc] = int32(siz) |
| 136 | continue |
| 137 | } |
| 138 | if m[defersc] != int32(siz) { |
| 139 | print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 140 | throw("bad defer size class") |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 141 | } |
| 142 | } |
| 143 | } |
| 144 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 145 | // The arguments associated with a deferred call are stored |
| 146 | // immediately after the _defer header in memory. |
| 147 | //go:nosplit |
| 148 | func deferArgs(d *_defer) unsafe.Pointer { |
| 149 | return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) |
| 150 | } |
| 151 | |
| 152 | var deferType *_type // type of _defer struct |
| 153 | |
| 154 | func init() { |
| 155 | var x interface{} |
| 156 | x = (*_defer)(nil) |
| 157 | deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem |
| 158 | } |
| 159 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 160 | // Allocate a Defer, usually using per-P pool. |
| 161 | // Each defer must be released with freedefer. |
Austin Clements | 0e8fed0 | 2014-11-18 09:54:50 -0500 | [diff] [blame] | 162 | // Note: runs on g0 stack |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 163 | func newdefer(siz int32) *_defer { |
| 164 | var d *_defer |
| 165 | sc := deferclass(uintptr(siz)) |
| 166 | mp := acquirem() |
| 167 | if sc < uintptr(len(p{}.deferpool)) { |
Russ Cox | 181e26b | 2015-04-17 00:21:30 -0400 | [diff] [blame] | 168 | pp := mp.p.ptr() |
Dmitry Vyukov | b759e22 | 2015-02-05 13:35:41 +0000 | [diff] [blame] | 169 | if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { |
| 170 | lock(&sched.deferlock) |
| 171 | for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { |
| 172 | d := sched.deferpool[sc] |
| 173 | sched.deferpool[sc] = d.link |
| 174 | d.link = nil |
| 175 | pp.deferpool[sc] = append(pp.deferpool[sc], d) |
| 176 | } |
| 177 | unlock(&sched.deferlock) |
| 178 | } |
Russ Cox | 84f5333 | 2015-03-05 09:52:41 -0500 | [diff] [blame] | 179 | if n := len(pp.deferpool[sc]); n > 0 { |
| 180 | d = pp.deferpool[sc][n-1] |
| 181 | pp.deferpool[sc][n-1] = nil |
| 182 | pp.deferpool[sc] = pp.deferpool[sc][:n-1] |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 183 | } |
| 184 | } |
| 185 | if d == nil { |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 186 | // Allocate new defer+args. |
Keith Randall | 0bb8fc6 | 2014-12-28 23:16:32 -0800 | [diff] [blame] | 187 | total := roundupsize(totaldefersize(uintptr(siz))) |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 188 | d = (*_defer)(mallocgc(total, deferType, 0)) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 189 | } |
| 190 | d.siz = siz |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 191 | gp := mp.curg |
| 192 | d.link = gp._defer |
| 193 | gp._defer = d |
| 194 | releasem(mp) |
| 195 | return d |
| 196 | } |
| 197 | |
| 198 | // Free the given defer. |
| 199 | // The defer cannot be used after this call. |
| 200 | func freedefer(d *_defer) { |
Russ Cox | e6708ee9b | 2014-10-07 23:17:31 -0400 | [diff] [blame] | 201 | if d._panic != nil { |
Russ Cox | f950a14 | 2014-10-07 23:39:00 -0400 | [diff] [blame] | 202 | freedeferpanic() |
Russ Cox | e6708ee9b | 2014-10-07 23:17:31 -0400 | [diff] [blame] | 203 | } |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 204 | if d.fn != nil { |
| 205 | freedeferfn() |
| 206 | } |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 207 | sc := deferclass(uintptr(d.siz)) |
| 208 | if sc < uintptr(len(p{}.deferpool)) { |
| 209 | mp := acquirem() |
Russ Cox | 181e26b | 2015-04-17 00:21:30 -0400 | [diff] [blame] | 210 | pp := mp.p.ptr() |
Dmitry Vyukov | b759e22 | 2015-02-05 13:35:41 +0000 | [diff] [blame] | 211 | if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { |
| 212 | // Transfer half of local cache to the central cache. |
| 213 | var first, last *_defer |
| 214 | for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { |
Russ Cox | 84f5333 | 2015-03-05 09:52:41 -0500 | [diff] [blame] | 215 | n := len(pp.deferpool[sc]) |
| 216 | d := pp.deferpool[sc][n-1] |
| 217 | pp.deferpool[sc][n-1] = nil |
| 218 | pp.deferpool[sc] = pp.deferpool[sc][:n-1] |
Dmitry Vyukov | b759e22 | 2015-02-05 13:35:41 +0000 | [diff] [blame] | 219 | if first == nil { |
| 220 | first = d |
| 221 | } else { |
| 222 | last.link = d |
| 223 | } |
| 224 | last = d |
| 225 | } |
| 226 | lock(&sched.deferlock) |
| 227 | last.link = sched.deferpool[sc] |
| 228 | sched.deferpool[sc] = first |
| 229 | unlock(&sched.deferlock) |
| 230 | } |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 231 | *d = _defer{} |
Dmitry Vyukov | b759e22 | 2015-02-05 13:35:41 +0000 | [diff] [blame] | 232 | pp.deferpool[sc] = append(pp.deferpool[sc], d) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 233 | releasem(mp) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 234 | } |
| 235 | } |
| 236 | |
Russ Cox | f950a14 | 2014-10-07 23:39:00 -0400 | [diff] [blame] | 237 | // Separate function so that it can split stack. |
| 238 | // Windows otherwise runs out of stack space. |
| 239 | func freedeferpanic() { |
| 240 | // _panic must be cleared before d is unlinked from gp. |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 241 | throw("freedefer with d._panic != nil") |
Russ Cox | f950a14 | 2014-10-07 23:39:00 -0400 | [diff] [blame] | 242 | } |
| 243 | |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 244 | func freedeferfn() { |
| 245 | // fn must be cleared before d is unlinked from gp. |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 246 | throw("freedefer with d.fn != nil") |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 247 | } |
| 248 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 249 | // Run a deferred function if there is one. |
| 250 | // The compiler inserts a call to this at the end of any |
| 251 | // function which calls defer. |
| 252 | // If there is a deferred function, this will call runtime·jmpdefer, |
| 253 | // which will jump to the deferred function such that it appears |
| 254 | // to have been called by the caller of deferreturn at the point |
| 255 | // just before deferreturn was called. The effect is that deferreturn |
| 256 | // is called again and again until there are no more deferred functions. |
| 257 | // Cannot split the stack because we reuse the caller's frame to |
| 258 | // call the deferred function. |
| 259 | |
| 260 | // The single argument isn't actually used - it just has its address |
| 261 | // taken so it can be matched against pending defers. |
| 262 | //go:nosplit |
| 263 | func deferreturn(arg0 uintptr) { |
| 264 | gp := getg() |
| 265 | d := gp._defer |
| 266 | if d == nil { |
| 267 | return |
| 268 | } |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 269 | sp := getcallersp(unsafe.Pointer(&arg0)) |
| 270 | if d.sp != sp { |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 271 | return |
| 272 | } |
| 273 | |
| 274 | // Moving arguments around. |
| 275 | // Do not allow preemption here, because the garbage collector |
| 276 | // won't know the form of the arguments until the jmpdefer can |
| 277 | // flip the PC over to fn. |
| 278 | mp := acquirem() |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 279 | memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 280 | fn := d.fn |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 281 | d.fn = nil |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 282 | gp._defer = d.link |
Dmitry Vyukov | b759e22 | 2015-02-05 13:35:41 +0000 | [diff] [blame] | 283 | // Switch to systemstack merely to save nosplit stack space. |
| 284 | systemstack(func() { |
| 285 | freedefer(d) |
| 286 | }) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 287 | releasem(mp) |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 288 | jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | // Goexit terminates the goroutine that calls it. No other goroutine is affected. |
Keith Randall | 15274e5 | 2014-09-16 12:50:05 -0700 | [diff] [blame] | 292 | // Goexit runs all deferred calls before terminating the goroutine. Because Goexit |
| 293 | // is not panic, however, any recover calls in those deferred functions will return nil. |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 294 | // |
| 295 | // Calling Goexit from the main goroutine terminates that goroutine |
| 296 | // without func main returning. Since func main has not returned, |
| 297 | // the program continues execution of other goroutines. |
| 298 | // If all other goroutines exit, the program crashes. |
| 299 | func Goexit() { |
| 300 | // Run all deferred functions for the current goroutine. |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 301 | // This code is similar to gopanic, see that implementation |
| 302 | // for detailed comments. |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 303 | gp := getg() |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 304 | for { |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 305 | d := gp._defer |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 306 | if d == nil { |
| 307 | break |
| 308 | } |
| 309 | if d.started { |
| 310 | if d._panic != nil { |
| 311 | d._panic.aborted = true |
Russ Cox | e6708ee9b | 2014-10-07 23:17:31 -0400 | [diff] [blame] | 312 | d._panic = nil |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 313 | } |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 314 | d.fn = nil |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 315 | gp._defer = d.link |
| 316 | freedefer(d) |
| 317 | continue |
| 318 | } |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 319 | d.started = true |
Russ Cox | df027ac | 2014-12-30 13:59:55 -0500 | [diff] [blame] | 320 | reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 321 | if gp._defer != d { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 322 | throw("bad defer entry in Goexit") |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 323 | } |
Russ Cox | e6708ee9b | 2014-10-07 23:17:31 -0400 | [diff] [blame] | 324 | d._panic = nil |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 325 | d.fn = nil |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 326 | gp._defer = d.link |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 327 | freedefer(d) |
| 328 | // Note: we ignore recovers here because Goexit isn't a panic |
| 329 | } |
Austin Clements | ad73188 | 2015-08-06 15:36:50 -0400 | [diff] [blame] | 330 | goexit1() |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 331 | } |
Russ Cox | c81a0ed | 2014-09-08 14:05:23 -0400 | [diff] [blame] | 332 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 333 | // Print all currently active panics. Used when crashing. |
| 334 | func printpanics(p *_panic) { |
| 335 | if p.link != nil { |
| 336 | printpanics(p.link) |
| 337 | print("\t") |
| 338 | } |
| 339 | print("panic: ") |
| 340 | printany(p.arg) |
| 341 | if p.recovered { |
| 342 | print(" [recovered]") |
| 343 | } |
| 344 | print("\n") |
| 345 | } |
| 346 | |
| 347 | // The implementation of the predeclared function panic. |
| 348 | func gopanic(e interface{}) { |
| 349 | gp := getg() |
| 350 | if gp.m.curg != gp { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 351 | print("panic: ") |
| 352 | printany(e) |
| 353 | print("\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 354 | throw("panic on system stack") |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 355 | } |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 356 | |
| 357 | // m.softfloat is set during software floating point. |
| 358 | // It increments m.locks to avoid preemption. |
| 359 | // We moved the memory loads out, so there shouldn't be |
| 360 | // any reason for it to panic anymore. |
| 361 | if gp.m.softfloat != 0 { |
| 362 | gp.m.locks-- |
| 363 | gp.m.softfloat = 0 |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 364 | throw("panic during softfloat") |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 365 | } |
| 366 | if gp.m.mallocing != 0 { |
| 367 | print("panic: ") |
| 368 | printany(e) |
| 369 | print("\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 370 | throw("panic during malloc") |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 371 | } |
Austin Clements | 28b5118 | 2015-01-30 15:30:41 -0500 | [diff] [blame] | 372 | if gp.m.preemptoff != "" { |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 373 | print("panic: ") |
| 374 | printany(e) |
| 375 | print("\n") |
Austin Clements | 28b5118 | 2015-01-30 15:30:41 -0500 | [diff] [blame] | 376 | print("preempt off reason: ") |
| 377 | print(gp.m.preemptoff) |
| 378 | print("\n") |
| 379 | throw("panic during preemptoff") |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 380 | } |
| 381 | if gp.m.locks != 0 { |
| 382 | print("panic: ") |
| 383 | printany(e) |
| 384 | print("\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 385 | throw("panic holding locks") |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 386 | } |
| 387 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 388 | var p _panic |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 389 | p.arg = e |
| 390 | p.link = gp._panic |
| 391 | gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) |
| 392 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 393 | for { |
| 394 | d := gp._defer |
| 395 | if d == nil { |
| 396 | break |
| 397 | } |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 398 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 399 | // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), |
| 400 | // take defer off list. The earlier panic or Goexit will not continue running. |
| 401 | if d.started { |
| 402 | if d._panic != nil { |
| 403 | d._panic.aborted = true |
| 404 | } |
Russ Cox | e6708ee9b | 2014-10-07 23:17:31 -0400 | [diff] [blame] | 405 | d._panic = nil |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 406 | d.fn = nil |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 407 | gp._defer = d.link |
| 408 | freedefer(d) |
| 409 | continue |
| 410 | } |
| 411 | |
| 412 | // Mark defer as started, but keep on list, so that traceback |
| 413 | // can find and update the defer's argument frame if stack growth |
Ainar Garipov | 7f9f70e | 2015-06-11 16:49:38 +0300 | [diff] [blame] | 414 | // or a garbage collection happens before reflectcall starts executing d.fn. |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 415 | d.started = true |
| 416 | |
| 417 | // Record the panic that is running the defer. |
| 418 | // If there is a new panic during the deferred call, that panic |
| 419 | // will find d in the list and will mark d._panic (this panic) aborted. |
| 420 | d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 421 | |
Russ Cox | 0f99a91 | 2014-09-08 21:02:36 -0400 | [diff] [blame] | 422 | p.argp = unsafe.Pointer(getargp(0)) |
Russ Cox | df027ac | 2014-12-30 13:59:55 -0500 | [diff] [blame] | 423 | reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) |
Russ Cox | 0f99a91 | 2014-09-08 21:02:36 -0400 | [diff] [blame] | 424 | p.argp = nil |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 425 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 426 | // reflectcall did not panic. Remove d. |
| 427 | if gp._defer != d { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 428 | throw("bad defer entry in panic") |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 429 | } |
Russ Cox | e6708ee9b | 2014-10-07 23:17:31 -0400 | [diff] [blame] | 430 | d._panic = nil |
Russ Cox | 94bdf13 | 2014-10-08 00:03:50 -0400 | [diff] [blame] | 431 | d.fn = nil |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 432 | gp._defer = d.link |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 433 | |
| 434 | // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic |
| 435 | //GC() |
| 436 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 437 | pc := d.pc |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 438 | sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 439 | freedefer(d) |
| 440 | if p.recovered { |
| 441 | gp._panic = p.link |
| 442 | // Aborted panics are marked but remain on the g.panic list. |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 443 | // Remove them from the list. |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 444 | for gp._panic != nil && gp._panic.aborted { |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 445 | gp._panic = gp._panic.link |
| 446 | } |
| 447 | if gp._panic == nil { // must be done with signal |
| 448 | gp.sig = 0 |
| 449 | } |
| 450 | // Pass information about recovering frame to recovery. |
Keith Randall | 53c5226 | 2014-12-08 14:18:58 -0800 | [diff] [blame] | 451 | gp.sigcode0 = uintptr(sp) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 452 | gp.sigcode1 = pc |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 453 | mcall(recovery) |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 454 | throw("recovery failed") // mcall should not return |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 455 | } |
| 456 | } |
| 457 | |
| 458 | // ran out of deferred calls - old-school panic now |
| 459 | startpanic() |
| 460 | printpanics(gp._panic) |
| 461 | dopanic(0) // should not return |
| 462 | *(*int)(nil) = 0 // not reached |
| 463 | } |
| 464 | |
| 465 | // getargp returns the location where the caller |
| 466 | // writes outgoing function call arguments. |
| 467 | //go:nosplit |
| 468 | func getargp(x int) uintptr { |
| 469 | // x is an argument mainly so that we can return its address. |
| 470 | // However, we need to make the function complex enough |
| 471 | // that it won't be inlined. We always pass x = 0, so this code |
| 472 | // does nothing other than keep the compiler from thinking |
| 473 | // the function is simple enough to inline. |
| 474 | if x > 0 { |
| 475 | return getcallersp(unsafe.Pointer(&x)) * 0 |
| 476 | } |
| 477 | return uintptr(noescape(unsafe.Pointer(&x))) |
| 478 | } |
| 479 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 480 | // The implementation of the predeclared function recover. |
| 481 | // Cannot split the stack because it needs to reliably |
| 482 | // find the stack segment of its caller. |
| 483 | // |
| 484 | // TODO(rsc): Once we commit to CopyStackAlways, |
| 485 | // this doesn't need to be nosplit. |
| 486 | //go:nosplit |
| 487 | func gorecover(argp uintptr) interface{} { |
| 488 | // Must be in a function running as part of a deferred call during the panic. |
| 489 | // Must be called from the topmost function of the call |
| 490 | // (the function used in the defer statement). |
| 491 | // p.argp is the argument pointer of that topmost deferred function call. |
| 492 | // Compare against argp reported by caller. |
| 493 | // If they match, the caller is the one who can recover. |
| 494 | gp := getg() |
| 495 | p := gp._panic |
Russ Cox | 0f99a91 | 2014-09-08 21:02:36 -0400 | [diff] [blame] | 496 | if p != nil && !p.recovered && argp == uintptr(p.argp) { |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 497 | p.recovered = true |
| 498 | return p.arg |
| 499 | } |
| 500 | return nil |
| 501 | } |
| 502 | |
| 503 | //go:nosplit |
| 504 | func startpanic() { |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 505 | systemstack(startpanic_m) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | //go:nosplit |
| 509 | func dopanic(unused int) { |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 510 | pc := getcallerpc(unsafe.Pointer(&unused)) |
| 511 | sp := getcallersp(unsafe.Pointer(&unused)) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 512 | gp := getg() |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 513 | systemstack(func() { |
| 514 | dopanic_m(gp, pc, sp) // should never return |
| 515 | }) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 516 | *(*int)(nil) = 0 |
| 517 | } |
| 518 | |
| 519 | //go:nosplit |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 520 | func throw(s string) { |
Austin Clements | f0bd539 | 2014-10-22 15:51:54 -0400 | [diff] [blame] | 521 | print("fatal error: ", s, "\n") |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 522 | gp := getg() |
| 523 | if gp.m.throwing == 0 { |
| 524 | gp.m.throwing = 1 |
| 525 | } |
| 526 | startpanic() |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 527 | dopanic(0) |
| 528 | *(*int)(nil) = 0 // not reached |
| 529 | } |