Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 1 | // Copyright 2014 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 7 | import "unsafe" |
| 8 | |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 9 | var indexError = error(errorString("index out of range")) |
| 10 | |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 11 | func panicindex() { |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 12 | panic(indexError) |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 13 | } |
| 14 | |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 15 | var sliceError = error(errorString("slice bounds out of range")) |
| 16 | |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 17 | func panicslice() { |
Russ Cox | 7006aaf | 2014-08-30 14:18:41 -0400 | [diff] [blame] | 18 | panic(sliceError) |
Keith Randall | 523aa93 | 2014-08-18 13:26:28 -0700 | [diff] [blame] | 19 | } |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 20 | |
| 21 | var divideError = error(errorString("integer divide by zero")) |
| 22 | |
| 23 | func panicdivide() { |
| 24 | panic(divideError) |
| 25 | } |
| 26 | |
Russ Cox | c81a0ed | 2014-09-08 14:05:23 -0400 | [diff] [blame] | 27 | var overflowError = error(errorString("integer overflow")) |
| 28 | |
| 29 | func panicoverflow() { |
| 30 | panic(overflowError) |
| 31 | } |
| 32 | |
| 33 | var floatError = error(errorString("floating point error")) |
| 34 | |
| 35 | func panicfloat() { |
| 36 | panic(floatError) |
| 37 | } |
| 38 | |
| 39 | var memoryError = error(errorString("invalid memory address or nil pointer dereference")) |
| 40 | |
| 41 | func panicmem() { |
| 42 | panic(memoryError) |
| 43 | } |
| 44 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 45 | func throwreturn() { |
| 46 | gothrow("no return at end of a typed function - compiler is broken") |
| 47 | } |
| 48 | |
| 49 | func throwinit() { |
| 50 | gothrow("recursive call during initialization - linker skew") |
| 51 | } |
| 52 | |
| 53 | // Create a new deferred function fn with siz bytes of arguments. |
| 54 | // The compiler turns a defer statement into a call to this. |
| 55 | //go:nosplit |
| 56 | func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn |
| 57 | // the arguments of fn are in a perilous state. The stack map |
| 58 | // for deferproc does not describe them. So we can't let garbage |
| 59 | // collection or stack copying trigger until we've copied them out |
| 60 | // to somewhere safe. deferproc_m does that. Until deferproc_m, |
| 61 | // we can only call nosplit routines. |
| 62 | argp := uintptr(unsafe.Pointer(&fn)) |
| 63 | argp += unsafe.Sizeof(fn) |
| 64 | if GOARCH == "arm" { |
| 65 | argp += ptrSize // skip caller's saved link register |
| 66 | } |
| 67 | mp := acquirem() |
| 68 | mp.scalararg[0] = uintptr(siz) |
| 69 | mp.ptrarg[0] = unsafe.Pointer(fn) |
| 70 | mp.scalararg[1] = argp |
| 71 | mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz)) |
| 72 | |
| 73 | if mp.curg != getg() { |
| 74 | // go code on the m stack can't defer |
| 75 | gothrow("defer on m") |
| 76 | } |
| 77 | |
| 78 | onM(deferproc_m) |
| 79 | |
| 80 | releasem(mp) |
| 81 | |
| 82 | // deferproc returns 0 normally. |
| 83 | // a deferred func that stops a panic |
| 84 | // makes the deferproc return 1. |
| 85 | // the code the compiler generates always |
| 86 | // checks the return value and jumps to the |
| 87 | // end of the function if deferproc returns != 0. |
| 88 | return0() |
| 89 | // No code can go here - the C return register has |
| 90 | // been set and must not be clobbered. |
| 91 | } |
| 92 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 93 | // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... |
| 94 | // Each P holds a pool for defers with small arg sizes. |
| 95 | // Assign defer allocations to pools by rounding to 16, to match malloc size classes. |
| 96 | |
| 97 | const ( |
| 98 | deferHeaderSize = unsafe.Sizeof(_defer{}) |
| 99 | minDeferAlloc = (deferHeaderSize + 15) &^ 15 |
| 100 | minDeferArgs = minDeferAlloc - deferHeaderSize |
| 101 | ) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 102 | |
| 103 | // defer size class for arg size sz |
Russ Cox | 857d55a | 2014-09-08 17:37:49 -0400 | [diff] [blame] | 104 | //go:nosplit |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 105 | func deferclass(siz uintptr) uintptr { |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 106 | if siz <= minDeferArgs { |
| 107 | return 0 |
| 108 | } |
| 109 | return (siz - minDeferArgs + 15) / 16 |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | // total size of memory block for defer with arg size sz |
| 113 | func totaldefersize(siz uintptr) uintptr { |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 114 | if siz <= minDeferArgs { |
| 115 | return minDeferAlloc |
| 116 | } |
| 117 | return deferHeaderSize + siz |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | // Ensure that defer arg sizes that map to the same defer size class |
| 121 | // also map to the same malloc size class. |
| 122 | func testdefersizes() { |
| 123 | var m [len(p{}.deferpool)]int32 |
| 124 | |
| 125 | for i := range m { |
| 126 | m[i] = -1 |
| 127 | } |
| 128 | for i := uintptr(0); ; i++ { |
| 129 | defersc := deferclass(i) |
| 130 | if defersc >= uintptr(len(m)) { |
| 131 | break |
| 132 | } |
| 133 | siz := goroundupsize(totaldefersize(i)) |
| 134 | if m[defersc] < 0 { |
| 135 | m[defersc] = int32(siz) |
| 136 | continue |
| 137 | } |
| 138 | if m[defersc] != int32(siz) { |
| 139 | print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") |
| 140 | gothrow("bad defer size class") |
| 141 | } |
| 142 | } |
| 143 | } |
| 144 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 145 | // The arguments associated with a deferred call are stored |
| 146 | // immediately after the _defer header in memory. |
| 147 | //go:nosplit |
| 148 | func deferArgs(d *_defer) unsafe.Pointer { |
| 149 | return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) |
| 150 | } |
| 151 | |
| 152 | var deferType *_type // type of _defer struct |
| 153 | |
| 154 | func init() { |
| 155 | var x interface{} |
| 156 | x = (*_defer)(nil) |
| 157 | deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem |
| 158 | } |
| 159 | |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 160 | // Allocate a Defer, usually using per-P pool. |
| 161 | // Each defer must be released with freedefer. |
| 162 | // Note: runs on M stack |
| 163 | func newdefer(siz int32) *_defer { |
| 164 | var d *_defer |
| 165 | sc := deferclass(uintptr(siz)) |
| 166 | mp := acquirem() |
| 167 | if sc < uintptr(len(p{}.deferpool)) { |
| 168 | pp := mp.p |
| 169 | d = pp.deferpool[sc] |
| 170 | if d != nil { |
| 171 | pp.deferpool[sc] = d.link |
| 172 | } |
| 173 | } |
| 174 | if d == nil { |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 175 | // Allocate new defer+args. |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 176 | total := goroundupsize(totaldefersize(uintptr(siz))) |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 177 | d = (*_defer)(mallocgc(total, deferType, 0)) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 178 | } |
| 179 | d.siz = siz |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 180 | gp := mp.curg |
| 181 | d.link = gp._defer |
| 182 | gp._defer = d |
| 183 | releasem(mp) |
| 184 | return d |
| 185 | } |
| 186 | |
| 187 | // Free the given defer. |
| 188 | // The defer cannot be used after this call. |
Russ Cox | 857d55a | 2014-09-08 17:37:49 -0400 | [diff] [blame] | 189 | //go:nosplit |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 190 | func freedefer(d *_defer) { |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 191 | sc := deferclass(uintptr(d.siz)) |
| 192 | if sc < uintptr(len(p{}.deferpool)) { |
| 193 | mp := acquirem() |
| 194 | pp := mp.p |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 195 | *d = _defer{} |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 196 | d.link = pp.deferpool[sc] |
| 197 | pp.deferpool[sc] = d |
| 198 | releasem(mp) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 199 | } |
| 200 | } |
| 201 | |
| 202 | // Run a deferred function if there is one. |
| 203 | // The compiler inserts a call to this at the end of any |
| 204 | // function which calls defer. |
| 205 | // If there is a deferred function, this will call runtime·jmpdefer, |
| 206 | // which will jump to the deferred function such that it appears |
| 207 | // to have been called by the caller of deferreturn at the point |
| 208 | // just before deferreturn was called. The effect is that deferreturn |
| 209 | // is called again and again until there are no more deferred functions. |
| 210 | // Cannot split the stack because we reuse the caller's frame to |
| 211 | // call the deferred function. |
| 212 | |
| 213 | // The single argument isn't actually used - it just has its address |
| 214 | // taken so it can be matched against pending defers. |
| 215 | //go:nosplit |
| 216 | func deferreturn(arg0 uintptr) { |
| 217 | gp := getg() |
| 218 | d := gp._defer |
| 219 | if d == nil { |
| 220 | return |
| 221 | } |
| 222 | argp := uintptr(unsafe.Pointer(&arg0)) |
| 223 | if d.argp != argp { |
| 224 | return |
| 225 | } |
| 226 | |
| 227 | // Moving arguments around. |
| 228 | // Do not allow preemption here, because the garbage collector |
| 229 | // won't know the form of the arguments until the jmpdefer can |
| 230 | // flip the PC over to fn. |
| 231 | mp := acquirem() |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 232 | memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz)) |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 233 | fn := d.fn |
| 234 | gp._defer = d.link |
| 235 | freedefer(d) |
| 236 | releasem(mp) |
| 237 | jmpdefer(fn, argp) |
| 238 | } |
| 239 | |
| 240 | // Goexit terminates the goroutine that calls it. No other goroutine is affected. |
Keith Randall | 15274e5 | 2014-09-16 12:50:05 -0700 | [diff] [blame] | 241 | // Goexit runs all deferred calls before terminating the goroutine. Because Goexit |
| 242 | // is not panic, however, any recover calls in those deferred functions will return nil. |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 243 | // |
| 244 | // Calling Goexit from the main goroutine terminates that goroutine |
| 245 | // without func main returning. Since func main has not returned, |
| 246 | // the program continues execution of other goroutines. |
| 247 | // If all other goroutines exit, the program crashes. |
| 248 | func Goexit() { |
| 249 | // Run all deferred functions for the current goroutine. |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 250 | // This code is similar to gopanic, see that implementation |
| 251 | // for detailed comments. |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 252 | gp := getg() |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 253 | for { |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 254 | d := gp._defer |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 255 | if d == nil { |
| 256 | break |
| 257 | } |
| 258 | if d.started { |
| 259 | if d._panic != nil { |
| 260 | d._panic.aborted = true |
| 261 | } |
| 262 | gp._defer = d.link |
| 263 | freedefer(d) |
| 264 | continue |
| 265 | } |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 266 | d.started = true |
| 267 | reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) |
Keith Randall | 0306478 | 2014-09-19 16:33:14 -0700 | [diff] [blame] | 268 | if gp._defer != d { |
| 269 | gothrow("bad defer entry in Goexit") |
| 270 | } |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 271 | gp._defer = d.link |
Keith Randall | f440737 | 2014-09-03 08:49:43 -0700 | [diff] [blame] | 272 | freedefer(d) |
| 273 | // Note: we ignore recovers here because Goexit isn't a panic |
| 274 | } |
| 275 | goexit() |
| 276 | } |
Russ Cox | c81a0ed | 2014-09-08 14:05:23 -0400 | [diff] [blame] | 277 | |
| 278 | func canpanic(*g) bool |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 279 | |
| 280 | // Print all currently active panics. Used when crashing. |
| 281 | func printpanics(p *_panic) { |
| 282 | if p.link != nil { |
| 283 | printpanics(p.link) |
| 284 | print("\t") |
| 285 | } |
| 286 | print("panic: ") |
| 287 | printany(p.arg) |
| 288 | if p.recovered { |
| 289 | print(" [recovered]") |
| 290 | } |
| 291 | print("\n") |
| 292 | } |
| 293 | |
| 294 | // The implementation of the predeclared function panic. |
| 295 | func gopanic(e interface{}) { |
| 296 | gp := getg() |
| 297 | if gp.m.curg != gp { |
| 298 | gothrow("panic on m stack") |
| 299 | } |
Russ Cox | c3b5db8 | 2014-09-18 14:49:24 -0400 | [diff] [blame] | 300 | |
| 301 | // m.softfloat is set during software floating point. |
| 302 | // It increments m.locks to avoid preemption. |
| 303 | // We moved the memory loads out, so there shouldn't be |
| 304 | // any reason for it to panic anymore. |
| 305 | if gp.m.softfloat != 0 { |
| 306 | gp.m.locks-- |
| 307 | gp.m.softfloat = 0 |
| 308 | gothrow("panic during softfloat") |
| 309 | } |
| 310 | if gp.m.mallocing != 0 { |
| 311 | print("panic: ") |
| 312 | printany(e) |
| 313 | print("\n") |
| 314 | gothrow("panic during malloc") |
| 315 | } |
| 316 | if gp.m.gcing != 0 { |
| 317 | print("panic: ") |
| 318 | printany(e) |
| 319 | print("\n") |
| 320 | gothrow("panic during gc") |
| 321 | } |
| 322 | if gp.m.locks != 0 { |
| 323 | print("panic: ") |
| 324 | printany(e) |
| 325 | print("\n") |
| 326 | gothrow("panic holding locks") |
| 327 | } |
| 328 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 329 | var p _panic |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 330 | p.arg = e |
| 331 | p.link = gp._panic |
| 332 | gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) |
| 333 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 334 | for { |
| 335 | d := gp._defer |
| 336 | if d == nil { |
| 337 | break |
| 338 | } |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 339 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 340 | // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), |
| 341 | // take defer off list. The earlier panic or Goexit will not continue running. |
| 342 | if d.started { |
| 343 | if d._panic != nil { |
| 344 | d._panic.aborted = true |
| 345 | } |
| 346 | gp._defer = d.link |
| 347 | freedefer(d) |
| 348 | continue |
| 349 | } |
| 350 | |
| 351 | // Mark defer as started, but keep on list, so that traceback |
| 352 | // can find and update the defer's argument frame if stack growth |
| 353 | // or a garbage collection hapens before reflectcall starts executing d.fn. |
| 354 | d.started = true |
| 355 | |
| 356 | // Record the panic that is running the defer. |
| 357 | // If there is a new panic during the deferred call, that panic |
| 358 | // will find d in the list and will mark d._panic (this panic) aborted. |
| 359 | d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 360 | |
Russ Cox | 0f99a91 | 2014-09-08 21:02:36 -0400 | [diff] [blame] | 361 | p.argp = unsafe.Pointer(getargp(0)) |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 362 | reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) |
Russ Cox | 0f99a91 | 2014-09-08 21:02:36 -0400 | [diff] [blame] | 363 | p.argp = nil |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 364 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 365 | // reflectcall did not panic. Remove d. |
| 366 | if gp._defer != d { |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 367 | gothrow("bad defer entry in panic") |
| 368 | } |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 369 | gp._defer = d.link |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 370 | |
| 371 | // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic |
| 372 | //GC() |
| 373 | |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 374 | pc := d.pc |
| 375 | argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 376 | freedefer(d) |
| 377 | if p.recovered { |
| 378 | gp._panic = p.link |
| 379 | // Aborted panics are marked but remain on the g.panic list. |
Russ Cox | f95beae | 2014-09-16 10:36:38 -0400 | [diff] [blame] | 380 | // Remove them from the list. |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 381 | for gp._panic != nil && gp._panic.aborted { |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 382 | gp._panic = gp._panic.link |
| 383 | } |
| 384 | if gp._panic == nil { // must be done with signal |
| 385 | gp.sig = 0 |
| 386 | } |
| 387 | // Pass information about recovering frame to recovery. |
| 388 | gp.sigcode0 = uintptr(argp) |
| 389 | gp.sigcode1 = pc |
| 390 | mcall(recovery_m) |
| 391 | gothrow("recovery failed") // mcall should not return |
| 392 | } |
| 393 | } |
| 394 | |
| 395 | // ran out of deferred calls - old-school panic now |
| 396 | startpanic() |
| 397 | printpanics(gp._panic) |
| 398 | dopanic(0) // should not return |
| 399 | *(*int)(nil) = 0 // not reached |
| 400 | } |
| 401 | |
| 402 | // getargp returns the location where the caller |
| 403 | // writes outgoing function call arguments. |
| 404 | //go:nosplit |
| 405 | func getargp(x int) uintptr { |
| 406 | // x is an argument mainly so that we can return its address. |
| 407 | // However, we need to make the function complex enough |
| 408 | // that it won't be inlined. We always pass x = 0, so this code |
| 409 | // does nothing other than keep the compiler from thinking |
| 410 | // the function is simple enough to inline. |
| 411 | if x > 0 { |
| 412 | return getcallersp(unsafe.Pointer(&x)) * 0 |
| 413 | } |
| 414 | return uintptr(noescape(unsafe.Pointer(&x))) |
| 415 | } |
| 416 | |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 417 | // The implementation of the predeclared function recover. |
| 418 | // Cannot split the stack because it needs to reliably |
| 419 | // find the stack segment of its caller. |
| 420 | // |
| 421 | // TODO(rsc): Once we commit to CopyStackAlways, |
| 422 | // this doesn't need to be nosplit. |
| 423 | //go:nosplit |
| 424 | func gorecover(argp uintptr) interface{} { |
| 425 | // Must be in a function running as part of a deferred call during the panic. |
| 426 | // Must be called from the topmost function of the call |
| 427 | // (the function used in the defer statement). |
| 428 | // p.argp is the argument pointer of that topmost deferred function call. |
| 429 | // Compare against argp reported by caller. |
| 430 | // If they match, the caller is the one who can recover. |
| 431 | gp := getg() |
| 432 | p := gp._panic |
Russ Cox | 0f99a91 | 2014-09-08 21:02:36 -0400 | [diff] [blame] | 433 | if p != nil && !p.recovered && argp == uintptr(p.argp) { |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 434 | p.recovered = true |
| 435 | return p.arg |
| 436 | } |
| 437 | return nil |
| 438 | } |
| 439 | |
| 440 | //go:nosplit |
| 441 | func startpanic() { |
Russ Cox | 1d550b8 | 2014-09-11 12:08:30 -0400 | [diff] [blame] | 442 | onM_signalok(startpanic_m) |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 443 | } |
| 444 | |
| 445 | //go:nosplit |
| 446 | func dopanic(unused int) { |
| 447 | gp := getg() |
| 448 | mp := acquirem() |
| 449 | mp.ptrarg[0] = unsafe.Pointer(gp) |
| 450 | mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused)) |
| 451 | mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused)) |
Russ Cox | 1d550b8 | 2014-09-11 12:08:30 -0400 | [diff] [blame] | 452 | onM_signalok(dopanic_m) // should never return |
Keith Randall | 3a3d47d | 2014-09-08 12:33:08 -0700 | [diff] [blame] | 453 | *(*int)(nil) = 0 |
| 454 | } |
| 455 | |
| 456 | //go:nosplit |
| 457 | func throw(s *byte) { |
| 458 | gp := getg() |
| 459 | if gp.m.throwing == 0 { |
| 460 | gp.m.throwing = 1 |
| 461 | } |
| 462 | startpanic() |
| 463 | print("fatal error: ", gostringnocopy(s), "\n") |
| 464 | dopanic(0) |
| 465 | *(*int)(nil) = 0 // not reached |
| 466 | } |
| 467 | |
| 468 | //go:nosplit |
| 469 | func gothrow(s string) { |
| 470 | gp := getg() |
| 471 | if gp.m.throwing == 0 { |
| 472 | gp.m.throwing = 1 |
| 473 | } |
| 474 | startpanic() |
| 475 | print("fatal error: ", s, "\n") |
| 476 | dopanic(0) |
| 477 | *(*int)(nil) = 0 // not reached |
| 478 | } |