[dev.power64] all: merge default into dev.power64

This brings dev.power64 up-to-date with the current tip of
default.  go_bootstrap is still panicking with a bad defer
when initializing the runtime (even on amd64).

LGTM=rsc
R=rsc
CC=golang-codereviews
https://golang.org/cl/152570049
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 75d155b..91b5da2 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -24,6 +24,24 @@
 	panic(divideError)
 }
 
+var overflowError = error(errorString("integer overflow"))
+
+func panicoverflow() {
+	panic(overflowError)
+}
+
+var floatError = error(errorString("floating point error"))
+
+func panicfloat() {
+	panic(floatError)
+}
+
+var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
+
+func panicmem() {
+	panic(memoryError)
+}
+
 func throwreturn() {
 	gothrow("no return at end of a typed function - compiler is broken")
 }
@@ -72,18 +90,31 @@
 	// been set and must not be clobbered.
 }
 
-// Each P holds pool for defers with arg sizes 8, 24, 40, 56 and 72 bytes.
-// Memory block is 40 (24 for 32 bits) bytes larger due to Defer header.
-// This maps exactly to malloc size classes.
+// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
+// Each P holds a pool for defers with small arg sizes.
+// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
+
+const (
+	deferHeaderSize = unsafe.Sizeof(_defer{})
+	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
+	minDeferArgs    = minDeferAlloc - deferHeaderSize
+)
 
 // defer size class for arg size sz
+//go:nosplit
 func deferclass(siz uintptr) uintptr {
-	return (siz + 7) >> 4
+	if siz <= minDeferArgs {
+		return 0
+	}
+	return (siz - minDeferArgs + 15) / 16
 }
 
 // total size of memory block for defer with arg size sz
 func totaldefersize(siz uintptr) uintptr {
-	return (unsafe.Sizeof(_defer{}) - unsafe.Sizeof(_defer{}.args)) + round(siz, ptrSize)
+	if siz <= minDeferArgs {
+		return minDeferAlloc
+	}
+	return deferHeaderSize + siz
 }
 
 // Ensure that defer arg sizes that map to the same defer size class
@@ -111,6 +142,21 @@
 	}
 }
 
+// The arguments associated with a deferred call are stored
+// immediately after the _defer header in memory.
+//go:nosplit
+func deferArgs(d *_defer) unsafe.Pointer {
+	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
+}
+
+var deferType *_type // type of _defer struct
+
+func init() {
+	var x interface{}
+	x = (*_defer)(nil)
+	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
+}
+
 // Allocate a Defer, usually using per-P pool.
 // Each defer must be released with freedefer.
 // Note: runs on M stack
@@ -126,12 +172,11 @@
 		}
 	}
 	if d == nil {
-		// deferpool is empty or just a big defer
+		// Allocate new defer+args.
 		total := goroundupsize(totaldefersize(uintptr(siz)))
-		d = (*_defer)(gomallocgc(total, conservative, 0))
+		d = (*_defer)(mallocgc(total, deferType, 0))
 	}
 	d.siz = siz
-	d.special = false
 	gp := mp.curg
 	d.link = gp._defer
 	gp._defer = d
@@ -141,22 +186,37 @@
 
 // Free the given defer.
 // The defer cannot be used after this call.
+//go:nosplit
 func freedefer(d *_defer) {
-	if d.special {
-		return
+	if d._panic != nil {
+		freedeferpanic()
+	}
+	if d.fn != nil {
+		freedeferfn()
 	}
 	sc := deferclass(uintptr(d.siz))
 	if sc < uintptr(len(p{}.deferpool)) {
 		mp := acquirem()
 		pp := mp.p
+		*d = _defer{}
 		d.link = pp.deferpool[sc]
 		pp.deferpool[sc] = d
 		releasem(mp)
-		// No need to wipe out pointers in argp/pc/fn/args,
-		// because we empty the pool before GC.
 	}
 }
 
+// Separate function so that it can split stack.
+// Windows otherwise runs out of stack space.
+func freedeferpanic() {
+	// _panic must be cleared before d is unlinked from gp.
+	gothrow("freedefer with d._panic != nil")
+}
+
+func freedeferfn() {
+	// fn must be cleared before d is unlinked from gp.
+	gothrow("freedefer with d.fn != nil")
+}
+
 // Run a deferred function if there is one.
 // The compiler inserts a call to this at the end of any
 // function which calls defer.
@@ -187,8 +247,9 @@
 	// won't know the form of the arguments until the jmpdefer can
 	// flip the PC over to fn.
 	mp := acquirem()
-	memmove(unsafe.Pointer(argp), unsafe.Pointer(&d.args), uintptr(d.siz))
+	memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz))
 	fn := d.fn
+	d.fn = nil
 	gp._defer = d.link
 	freedefer(d)
 	releasem(mp)
@@ -196,7 +257,8 @@
 }
 
 // Goexit terminates the goroutine that calls it.  No other goroutine is affected.
-// Goexit runs all deferred calls before terminating the goroutine.
+// Goexit runs all deferred calls before terminating the goroutine.  Because Goexit
+// is not panic, however, any recover calls in those deferred functions will return nil.
 //
 // Calling Goexit from the main goroutine terminates that goroutine
 // without func main returning. Since func main has not returned,
@@ -204,13 +266,240 @@
 // If all other goroutines exit, the program crashes.
 func Goexit() {
 	// Run all deferred functions for the current goroutine.
+	// This code is similar to gopanic, see that implementation
+	// for detailed comments.
 	gp := getg()
-	for gp._defer != nil {
+	for {
 		d := gp._defer
+		if d == nil {
+			break
+		}
+		if d.started {
+			if d._panic != nil {
+				d._panic.aborted = true
+				d._panic = nil
+			}
+			d.fn = nil
+			gp._defer = d.link
+			freedefer(d)
+			continue
+		}
+		d.started = true
+		reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
+		if gp._defer != d {
+			gothrow("bad defer entry in Goexit")
+		}
+		d._panic = nil
+		d.fn = nil
 		gp._defer = d.link
-		reflectcall(unsafe.Pointer(d.fn), unsafe.Pointer(&d.args), uint32(d.siz), uint32(d.siz))
 		freedefer(d)
 		// Note: we ignore recovers here because Goexit isn't a panic
 	}
 	goexit()
 }
+
+func canpanic(*g) bool
+
+// Print all currently active panics.  Used when crashing.
+func printpanics(p *_panic) {
+	if p.link != nil {
+		printpanics(p.link)
+		print("\t")
+	}
+	print("panic: ")
+	printany(p.arg)
+	if p.recovered {
+		print(" [recovered]")
+	}
+	print("\n")
+}
+
+// The implementation of the predeclared function panic.
+func gopanic(e interface{}) {
+	gp := getg()
+	if gp.m.curg != gp {
+		gothrow("panic on m stack")
+	}
+
+	// m.softfloat is set during software floating point.
+	// It increments m.locks to avoid preemption.
+	// We moved the memory loads out, so there shouldn't be
+	// any reason for it to panic anymore.
+	if gp.m.softfloat != 0 {
+		gp.m.locks--
+		gp.m.softfloat = 0
+		gothrow("panic during softfloat")
+	}
+	if gp.m.mallocing != 0 {
+		print("panic: ")
+		printany(e)
+		print("\n")
+		gothrow("panic during malloc")
+	}
+	if gp.m.gcing != 0 {
+		print("panic: ")
+		printany(e)
+		print("\n")
+		gothrow("panic during gc")
+	}
+	if gp.m.locks != 0 {
+		print("panic: ")
+		printany(e)
+		print("\n")
+		gothrow("panic holding locks")
+	}
+
+	var p _panic
+	p.arg = e
+	p.link = gp._panic
+	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
+
+	for {
+		d := gp._defer
+		if d == nil {
+			break
+		}
+
+		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
+		// take defer off list. The earlier panic or Goexit will not continue running.
+		if d.started {
+			if d._panic != nil {
+				d._panic.aborted = true
+			}
+			d._panic = nil
+			d.fn = nil
+			gp._defer = d.link
+			freedefer(d)
+			continue
+		}
+
+		// Mark defer as started, but keep on list, so that traceback
+		// can find and update the defer's argument frame if stack growth
+		// or a garbage collection hapens before reflectcall starts executing d.fn.
+		d.started = true
+
+		// Record the panic that is running the defer.
+		// If there is a new panic during the deferred call, that panic
+		// will find d in the list and will mark d._panic (this panic) aborted.
+		d._panic = (*_panic)(noescape((unsafe.Pointer)(&p)))
+
+		p.argp = unsafe.Pointer(getargp(0))
+		reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
+		p.argp = nil
+
+		// reflectcall did not panic. Remove d.
+		if gp._defer != d {
+			gothrow("bad defer entry in panic")
+		}
+		d._panic = nil
+		d.fn = nil
+		gp._defer = d.link
+
+		// trigger shrinkage to test stack copy.  See stack_test.go:TestStackPanic
+		//GC()
+
+		pc := d.pc
+		argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy
+		freedefer(d)
+		if p.recovered {
+			gp._panic = p.link
+			// Aborted panics are marked but remain on the g.panic list.
+			// Remove them from the list.
+			for gp._panic != nil && gp._panic.aborted {
+				gp._panic = gp._panic.link
+			}
+			if gp._panic == nil { // must be done with signal
+				gp.sig = 0
+			}
+			// Pass information about recovering frame to recovery.
+			gp.sigcode0 = uintptr(argp)
+			gp.sigcode1 = pc
+			mcall(recovery_m)
+			gothrow("recovery failed") // mcall should not return
+		}
+	}
+
+	// ran out of deferred calls - old-school panic now
+	startpanic()
+	printpanics(gp._panic)
+	dopanic(0)       // should not return
+	*(*int)(nil) = 0 // not reached
+}
+
+// getargp returns the location where the caller
+// writes outgoing function call arguments.
+//go:nosplit
+func getargp(x int) uintptr {
+	// x is an argument mainly so that we can return its address.
+	// However, we need to make the function complex enough
+	// that it won't be inlined. We always pass x = 0, so this code
+	// does nothing other than keep the compiler from thinking
+	// the function is simple enough to inline.
+	if x > 0 {
+		return getcallersp(unsafe.Pointer(&x)) * 0
+	}
+	return uintptr(noescape(unsafe.Pointer(&x)))
+}
+
+// The implementation of the predeclared function recover.
+// Cannot split the stack because it needs to reliably
+// find the stack segment of its caller.
+//
+// TODO(rsc): Once we commit to CopyStackAlways,
+// this doesn't need to be nosplit.
+//go:nosplit
+func gorecover(argp uintptr) interface{} {
+	// Must be in a function running as part of a deferred call during the panic.
+	// Must be called from the topmost function of the call
+	// (the function used in the defer statement).
+	// p.argp is the argument pointer of that topmost deferred function call.
+	// Compare against argp reported by caller.
+	// If they match, the caller is the one who can recover.
+	gp := getg()
+	p := gp._panic
+	if p != nil && !p.recovered && argp == uintptr(p.argp) {
+		p.recovered = true
+		return p.arg
+	}
+	return nil
+}
+
+//go:nosplit
+func startpanic() {
+	onM_signalok(startpanic_m)
+}
+
+//go:nosplit
+func dopanic(unused int) {
+	gp := getg()
+	mp := acquirem()
+	mp.ptrarg[0] = unsafe.Pointer(gp)
+	mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused))
+	mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused))
+	onM_signalok(dopanic_m) // should never return
+	*(*int)(nil) = 0
+}
+
+//go:nosplit
+func throw(s *byte) {
+	gp := getg()
+	if gp.m.throwing == 0 {
+		gp.m.throwing = 1
+	}
+	startpanic()
+	print("fatal error: ", gostringnocopy(s), "\n")
+	dopanic(0)
+	*(*int)(nil) = 0 // not reached
+}
+
+//go:nosplit
+func gothrow(s string) {
+	print("fatal error: ", s, "\n")
+	gp := getg()
+	if gp.m.throwing == 0 {
+		gp.m.throwing = 1
+	}
+	startpanic()
+	dopanic(0)
+	*(*int)(nil) = 0 // not reached
+}