blob: 7eb2d6055a82c2f299a593cb22c566f5a57a95cd [file] [log] [blame]
Keith Randall523aa932014-08-18 13:26:28 -07001// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
Keith Randallf4407372014-09-03 08:49:43 -07007import "unsafe"
8
Russ Cox7006aaf2014-08-30 14:18:41 -04009var indexError = error(errorString("index out of range"))
10
Keith Randall523aa932014-08-18 13:26:28 -070011func panicindex() {
Russ Cox7006aaf2014-08-30 14:18:41 -040012 panic(indexError)
Keith Randall523aa932014-08-18 13:26:28 -070013}
14
Russ Cox7006aaf2014-08-30 14:18:41 -040015var sliceError = error(errorString("slice bounds out of range"))
16
Keith Randall523aa932014-08-18 13:26:28 -070017func panicslice() {
Russ Cox7006aaf2014-08-30 14:18:41 -040018 panic(sliceError)
Keith Randall523aa932014-08-18 13:26:28 -070019}
Keith Randallf4407372014-09-03 08:49:43 -070020
21var divideError = error(errorString("integer divide by zero"))
22
23func panicdivide() {
24 panic(divideError)
25}
26
Russ Coxc81a0ed2014-09-08 14:05:23 -040027var overflowError = error(errorString("integer overflow"))
28
29func panicoverflow() {
30 panic(overflowError)
31}
32
33var floatError = error(errorString("floating point error"))
34
35func panicfloat() {
36 panic(floatError)
37}
38
39var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
40
41func panicmem() {
42 panic(memoryError)
43}
44
Keith Randallf4407372014-09-03 08:49:43 -070045func throwreturn() {
46 gothrow("no return at end of a typed function - compiler is broken")
47}
48
49func throwinit() {
50 gothrow("recursive call during initialization - linker skew")
51}
52
53// Create a new deferred function fn with siz bytes of arguments.
54// The compiler turns a defer statement into a call to this.
55//go:nosplit
56func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
57 // the arguments of fn are in a perilous state. The stack map
58 // for deferproc does not describe them. So we can't let garbage
59 // collection or stack copying trigger until we've copied them out
60 // to somewhere safe. deferproc_m does that. Until deferproc_m,
61 // we can only call nosplit routines.
62 argp := uintptr(unsafe.Pointer(&fn))
63 argp += unsafe.Sizeof(fn)
64 if GOARCH == "arm" {
65 argp += ptrSize // skip caller's saved link register
66 }
67 mp := acquirem()
68 mp.scalararg[0] = uintptr(siz)
69 mp.ptrarg[0] = unsafe.Pointer(fn)
70 mp.scalararg[1] = argp
71 mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz))
72
73 if mp.curg != getg() {
74 // go code on the m stack can't defer
75 gothrow("defer on m")
76 }
77
78 onM(deferproc_m)
79
80 releasem(mp)
81
82 // deferproc returns 0 normally.
83 // a deferred func that stops a panic
84 // makes the deferproc return 1.
85 // the code the compiler generates always
86 // checks the return value and jumps to the
87 // end of the function if deferproc returns != 0.
88 return0()
89 // No code can go here - the C return register has
90 // been set and must not be clobbered.
91}
92
Russ Coxf95beae2014-09-16 10:36:38 -040093// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
94// Each P holds a pool for defers with small arg sizes.
95// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
96
97const (
98 deferHeaderSize = unsafe.Sizeof(_defer{})
99 minDeferAlloc = (deferHeaderSize + 15) &^ 15
100 minDeferArgs = minDeferAlloc - deferHeaderSize
101)
Keith Randallf4407372014-09-03 08:49:43 -0700102
103// defer size class for arg size sz
Russ Cox857d55a2014-09-08 17:37:49 -0400104//go:nosplit
Keith Randallf4407372014-09-03 08:49:43 -0700105func deferclass(siz uintptr) uintptr {
Russ Coxf95beae2014-09-16 10:36:38 -0400106 if siz <= minDeferArgs {
107 return 0
108 }
109 return (siz - minDeferArgs + 15) / 16
Keith Randallf4407372014-09-03 08:49:43 -0700110}
111
112// total size of memory block for defer with arg size sz
113func totaldefersize(siz uintptr) uintptr {
Russ Coxf95beae2014-09-16 10:36:38 -0400114 if siz <= minDeferArgs {
115 return minDeferAlloc
116 }
117 return deferHeaderSize + siz
Keith Randallf4407372014-09-03 08:49:43 -0700118}
119
120// Ensure that defer arg sizes that map to the same defer size class
121// also map to the same malloc size class.
122func testdefersizes() {
123 var m [len(p{}.deferpool)]int32
124
125 for i := range m {
126 m[i] = -1
127 }
128 for i := uintptr(0); ; i++ {
129 defersc := deferclass(i)
130 if defersc >= uintptr(len(m)) {
131 break
132 }
133 siz := goroundupsize(totaldefersize(i))
134 if m[defersc] < 0 {
135 m[defersc] = int32(siz)
136 continue
137 }
138 if m[defersc] != int32(siz) {
139 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
140 gothrow("bad defer size class")
141 }
142 }
143}
144
Russ Coxf95beae2014-09-16 10:36:38 -0400145// The arguments associated with a deferred call are stored
146// immediately after the _defer header in memory.
147//go:nosplit
148func deferArgs(d *_defer) unsafe.Pointer {
149 return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
150}
151
152var deferType *_type // type of _defer struct
153
154func init() {
155 var x interface{}
156 x = (*_defer)(nil)
157 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
158}
159
Keith Randallf4407372014-09-03 08:49:43 -0700160// Allocate a Defer, usually using per-P pool.
161// Each defer must be released with freedefer.
162// Note: runs on M stack
163func newdefer(siz int32) *_defer {
164 var d *_defer
165 sc := deferclass(uintptr(siz))
166 mp := acquirem()
167 if sc < uintptr(len(p{}.deferpool)) {
168 pp := mp.p
169 d = pp.deferpool[sc]
170 if d != nil {
171 pp.deferpool[sc] = d.link
172 }
173 }
174 if d == nil {
Russ Coxf95beae2014-09-16 10:36:38 -0400175 // Allocate new defer+args.
Keith Randallf4407372014-09-03 08:49:43 -0700176 total := goroundupsize(totaldefersize(uintptr(siz)))
Russ Coxf95beae2014-09-16 10:36:38 -0400177 d = (*_defer)(mallocgc(total, deferType, 0))
Keith Randallf4407372014-09-03 08:49:43 -0700178 }
179 d.siz = siz
Keith Randallf4407372014-09-03 08:49:43 -0700180 gp := mp.curg
181 d.link = gp._defer
182 gp._defer = d
183 releasem(mp)
184 return d
185}
186
187// Free the given defer.
188// The defer cannot be used after this call.
Russ Cox857d55a2014-09-08 17:37:49 -0400189//go:nosplit
Keith Randallf4407372014-09-03 08:49:43 -0700190func freedefer(d *_defer) {
Keith Randallf4407372014-09-03 08:49:43 -0700191 sc := deferclass(uintptr(d.siz))
192 if sc < uintptr(len(p{}.deferpool)) {
193 mp := acquirem()
194 pp := mp.p
Russ Coxf95beae2014-09-16 10:36:38 -0400195 *d = _defer{}
Keith Randallf4407372014-09-03 08:49:43 -0700196 d.link = pp.deferpool[sc]
197 pp.deferpool[sc] = d
198 releasem(mp)
Keith Randallf4407372014-09-03 08:49:43 -0700199 }
200}
201
202// Run a deferred function if there is one.
203// The compiler inserts a call to this at the end of any
204// function which calls defer.
205// If there is a deferred function, this will call runtime·jmpdefer,
206// which will jump to the deferred function such that it appears
207// to have been called by the caller of deferreturn at the point
208// just before deferreturn was called. The effect is that deferreturn
209// is called again and again until there are no more deferred functions.
210// Cannot split the stack because we reuse the caller's frame to
211// call the deferred function.
212
213// The single argument isn't actually used - it just has its address
214// taken so it can be matched against pending defers.
215//go:nosplit
216func deferreturn(arg0 uintptr) {
217 gp := getg()
218 d := gp._defer
219 if d == nil {
220 return
221 }
222 argp := uintptr(unsafe.Pointer(&arg0))
223 if d.argp != argp {
224 return
225 }
226
227 // Moving arguments around.
228 // Do not allow preemption here, because the garbage collector
229 // won't know the form of the arguments until the jmpdefer can
230 // flip the PC over to fn.
231 mp := acquirem()
Russ Coxf95beae2014-09-16 10:36:38 -0400232 memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz))
Keith Randallf4407372014-09-03 08:49:43 -0700233 fn := d.fn
234 gp._defer = d.link
235 freedefer(d)
236 releasem(mp)
237 jmpdefer(fn, argp)
238}
239
240// Goexit terminates the goroutine that calls it. No other goroutine is affected.
Keith Randall15274e52014-09-16 12:50:05 -0700241// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
242// is not panic, however, any recover calls in those deferred functions will return nil.
Keith Randallf4407372014-09-03 08:49:43 -0700243//
244// Calling Goexit from the main goroutine terminates that goroutine
245// without func main returning. Since func main has not returned,
246// the program continues execution of other goroutines.
247// If all other goroutines exit, the program crashes.
248func Goexit() {
249 // Run all deferred functions for the current goroutine.
Keith Randall03064782014-09-19 16:33:14 -0700250 // This code is similar to gopanic, see that implementation
251 // for detailed comments.
Keith Randallf4407372014-09-03 08:49:43 -0700252 gp := getg()
Keith Randall03064782014-09-19 16:33:14 -0700253 for {
Keith Randallf4407372014-09-03 08:49:43 -0700254 d := gp._defer
Keith Randall03064782014-09-19 16:33:14 -0700255 if d == nil {
256 break
257 }
258 if d.started {
259 if d._panic != nil {
260 d._panic.aborted = true
261 }
262 gp._defer = d.link
263 freedefer(d)
264 continue
265 }
Russ Coxf95beae2014-09-16 10:36:38 -0400266 d.started = true
267 reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
Keith Randall03064782014-09-19 16:33:14 -0700268 if gp._defer != d {
269 gothrow("bad defer entry in Goexit")
270 }
Keith Randallf4407372014-09-03 08:49:43 -0700271 gp._defer = d.link
Keith Randallf4407372014-09-03 08:49:43 -0700272 freedefer(d)
273 // Note: we ignore recovers here because Goexit isn't a panic
274 }
275 goexit()
276}
Russ Coxc81a0ed2014-09-08 14:05:23 -0400277
278func canpanic(*g) bool
Keith Randall3a3d47d2014-09-08 12:33:08 -0700279
280// Print all currently active panics. Used when crashing.
281func printpanics(p *_panic) {
282 if p.link != nil {
283 printpanics(p.link)
284 print("\t")
285 }
286 print("panic: ")
287 printany(p.arg)
288 if p.recovered {
289 print(" [recovered]")
290 }
291 print("\n")
292}
293
294// The implementation of the predeclared function panic.
295func gopanic(e interface{}) {
296 gp := getg()
297 if gp.m.curg != gp {
298 gothrow("panic on m stack")
299 }
Russ Coxc3b5db82014-09-18 14:49:24 -0400300
301 // m.softfloat is set during software floating point.
302 // It increments m.locks to avoid preemption.
303 // We moved the memory loads out, so there shouldn't be
304 // any reason for it to panic anymore.
305 if gp.m.softfloat != 0 {
306 gp.m.locks--
307 gp.m.softfloat = 0
308 gothrow("panic during softfloat")
309 }
310 if gp.m.mallocing != 0 {
311 print("panic: ")
312 printany(e)
313 print("\n")
314 gothrow("panic during malloc")
315 }
316 if gp.m.gcing != 0 {
317 print("panic: ")
318 printany(e)
319 print("\n")
320 gothrow("panic during gc")
321 }
322 if gp.m.locks != 0 {
323 print("panic: ")
324 printany(e)
325 print("\n")
326 gothrow("panic holding locks")
327 }
328
Keith Randall3a3d47d2014-09-08 12:33:08 -0700329 var p _panic
Keith Randall3a3d47d2014-09-08 12:33:08 -0700330 p.arg = e
331 p.link = gp._panic
332 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
333
Keith Randall3a3d47d2014-09-08 12:33:08 -0700334 for {
335 d := gp._defer
336 if d == nil {
337 break
338 }
Keith Randall3a3d47d2014-09-08 12:33:08 -0700339
Russ Coxf95beae2014-09-16 10:36:38 -0400340 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
341 // take defer off list. The earlier panic or Goexit will not continue running.
342 if d.started {
343 if d._panic != nil {
344 d._panic.aborted = true
345 }
346 gp._defer = d.link
347 freedefer(d)
348 continue
349 }
350
351 // Mark defer as started, but keep on list, so that traceback
352 // can find and update the defer's argument frame if stack growth
353 // or a garbage collection hapens before reflectcall starts executing d.fn.
354 d.started = true
355
356 // Record the panic that is running the defer.
357 // If there is a new panic during the deferred call, that panic
358 // will find d in the list and will mark d._panic (this panic) aborted.
359 d._panic = (*_panic)(noescape((unsafe.Pointer)(&p)))
Keith Randall3a3d47d2014-09-08 12:33:08 -0700360
Russ Cox0f99a912014-09-08 21:02:36 -0400361 p.argp = unsafe.Pointer(getargp(0))
Russ Coxf95beae2014-09-16 10:36:38 -0400362 reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
Russ Cox0f99a912014-09-08 21:02:36 -0400363 p.argp = nil
Keith Randall3a3d47d2014-09-08 12:33:08 -0700364
Russ Coxf95beae2014-09-16 10:36:38 -0400365 // reflectcall did not panic. Remove d.
366 if gp._defer != d {
Keith Randall3a3d47d2014-09-08 12:33:08 -0700367 gothrow("bad defer entry in panic")
368 }
Russ Coxf95beae2014-09-16 10:36:38 -0400369 gp._defer = d.link
Keith Randall3a3d47d2014-09-08 12:33:08 -0700370
371 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
372 //GC()
373
Russ Coxf95beae2014-09-16 10:36:38 -0400374 pc := d.pc
375 argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy
Keith Randall3a3d47d2014-09-08 12:33:08 -0700376 freedefer(d)
377 if p.recovered {
378 gp._panic = p.link
379 // Aborted panics are marked but remain on the g.panic list.
Russ Coxf95beae2014-09-16 10:36:38 -0400380 // Remove them from the list.
Keith Randall3a3d47d2014-09-08 12:33:08 -0700381 for gp._panic != nil && gp._panic.aborted {
Keith Randall3a3d47d2014-09-08 12:33:08 -0700382 gp._panic = gp._panic.link
383 }
384 if gp._panic == nil { // must be done with signal
385 gp.sig = 0
386 }
387 // Pass information about recovering frame to recovery.
388 gp.sigcode0 = uintptr(argp)
389 gp.sigcode1 = pc
390 mcall(recovery_m)
391 gothrow("recovery failed") // mcall should not return
392 }
393 }
394
395 // ran out of deferred calls - old-school panic now
396 startpanic()
397 printpanics(gp._panic)
398 dopanic(0) // should not return
399 *(*int)(nil) = 0 // not reached
400}
401
402// getargp returns the location where the caller
403// writes outgoing function call arguments.
404//go:nosplit
405func getargp(x int) uintptr {
406 // x is an argument mainly so that we can return its address.
407 // However, we need to make the function complex enough
408 // that it won't be inlined. We always pass x = 0, so this code
409 // does nothing other than keep the compiler from thinking
410 // the function is simple enough to inline.
411 if x > 0 {
412 return getcallersp(unsafe.Pointer(&x)) * 0
413 }
414 return uintptr(noescape(unsafe.Pointer(&x)))
415}
416
Keith Randall3a3d47d2014-09-08 12:33:08 -0700417// The implementation of the predeclared function recover.
418// Cannot split the stack because it needs to reliably
419// find the stack segment of its caller.
420//
421// TODO(rsc): Once we commit to CopyStackAlways,
422// this doesn't need to be nosplit.
423//go:nosplit
424func gorecover(argp uintptr) interface{} {
425 // Must be in a function running as part of a deferred call during the panic.
426 // Must be called from the topmost function of the call
427 // (the function used in the defer statement).
428 // p.argp is the argument pointer of that topmost deferred function call.
429 // Compare against argp reported by caller.
430 // If they match, the caller is the one who can recover.
431 gp := getg()
432 p := gp._panic
Russ Cox0f99a912014-09-08 21:02:36 -0400433 if p != nil && !p.recovered && argp == uintptr(p.argp) {
Keith Randall3a3d47d2014-09-08 12:33:08 -0700434 p.recovered = true
435 return p.arg
436 }
437 return nil
438}
439
440//go:nosplit
441func startpanic() {
Russ Cox1d550b82014-09-11 12:08:30 -0400442 onM_signalok(startpanic_m)
Keith Randall3a3d47d2014-09-08 12:33:08 -0700443}
444
445//go:nosplit
446func dopanic(unused int) {
447 gp := getg()
448 mp := acquirem()
449 mp.ptrarg[0] = unsafe.Pointer(gp)
450 mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused))
451 mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused))
Russ Cox1d550b82014-09-11 12:08:30 -0400452 onM_signalok(dopanic_m) // should never return
Keith Randall3a3d47d2014-09-08 12:33:08 -0700453 *(*int)(nil) = 0
454}
455
456//go:nosplit
457func throw(s *byte) {
458 gp := getg()
459 if gp.m.throwing == 0 {
460 gp.m.throwing = 1
461 }
462 startpanic()
463 print("fatal error: ", gostringnocopy(s), "\n")
464 dopanic(0)
465 *(*int)(nil) = 0 // not reached
466}
467
468//go:nosplit
469func gothrow(s string) {
470 gp := getg()
471 if gp.m.throwing == 0 {
472 gp.m.throwing = 1
473 }
474 startpanic()
475 print("fatal error: ", s, "\n")
476 dopanic(0)
477 *(*int)(nil) = 0 // not reached
478}