blob: 6149838b6d62752f8c8264effed6ac613ff47f51 [file] [log] [blame]
Russ Coxd98553a2014-11-11 17:04:34 -05001// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
Michael Matloob67faca72015-11-02 14:09:24 -05007import (
8 "runtime/internal/atomic"
Michael Matloob432cb662015-11-11 12:39:30 -05009 "runtime/internal/sys"
Michael Matloob67faca72015-11-02 14:09:24 -050010 "unsafe"
11)
Russ Coxd98553a2014-11-11 17:04:34 -050012
Nodir Turakulovdb2e73f2015-10-16 18:45:30 -070013/*
14Stack layout parameters.
15Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
16
17The per-goroutine g->stackguard is set to point StackGuard bytes
18above the bottom of the stack. Each function compares its stack
19pointer against g->stackguard to check for overflow. To cut one
20instruction from the check sequence for functions with tiny frames,
21the stack is allowed to protrude StackSmall bytes below the stack
22guard. Functions with large frames don't bother with the check and
23always call morestack. The sequences are (for amd64, others are
24similar):
25
26 guard = g->stackguard
27 frame = function's stack frame size
28 argsize = size of function arguments (call + return)
29
30 stack frame size <= StackSmall:
31 CMPQ guard, SP
32 JHI 3(PC)
33 MOVQ m->morearg, $(argsize << 32)
34 CALL morestack(SB)
35
36 stack frame size > StackSmall but < StackBig
37 LEAQ (frame-StackSmall)(SP), R0
38 CMPQ guard, R0
39 JHI 3(PC)
40 MOVQ m->morearg, $(argsize << 32)
41 CALL morestack(SB)
42
43 stack frame size >= StackBig:
44 MOVQ m->morearg, $((argsize << 32) | frame)
45 CALL morestack(SB)
46
47The bottom StackGuard - StackSmall bytes are important: there has
48to be enough room to execute functions that refuse to check for
49stack overflow, either because they need to be adjacent to the
50actual caller's frame (deferproc) or because they handle the imminent
51stack overflow (morestack).
52
53For example, deferproc might call malloc, which does one of the
54above checks (without allocating a full frame), which might trigger
55a call to morestack. This sequence needs to fit in the bottom
56section of the stack. On amd64, morestack's frame is 40 bytes, and
57deferproc's frame is 56 bytes. That fits well within the
58StackGuard - StackSmall bytes at the bottom.
59The linkers explore all possible call traces involving non-splitting
60functions to make sure that this limit cannot be violated.
61*/
62
63const (
64 // StackSystem is a number of additional bytes to add
65 // to each stack below the usual guard area for OS-specific
66 // purposes like signal handling. Used on Windows, Plan 9,
67 // and Darwin/ARM because they do not use a separate stack.
Michael Matloob432cb662015-11-11 12:39:30 -050068 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
Nodir Turakulovdb2e73f2015-10-16 18:45:30 -070069
70 // The minimum size of stack used by Go code
71 _StackMin = 2048
72
73 // The minimum stack size to allocate.
74 // The hackery here rounds FixedStack0 up to a power of 2.
75 _FixedStack0 = _StackMin + _StackSystem
76 _FixedStack1 = _FixedStack0 - 1
77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82 _FixedStack = _FixedStack6 + 1
83
84 // Functions that need frames bigger than this use an extra
85 // instruction to do the stack split check, to avoid overflow
86 // in case SP - framesize wraps below zero.
87 // This value can be no bigger than the size of the unmapped
88 // space at zero.
89 _StackBig = 4096
90
91 // The stack guard is a pointer this many bytes above the
92 // bottom of the stack.
David Chase5b9ff112016-08-15 13:51:00 -070093 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
Nodir Turakulovdb2e73f2015-10-16 18:45:30 -070094
95 // After a stack split check the SP is allowed to be this
Brad Fitzpatrick5fea2cc2016-03-01 23:21:55 +000096 // many bytes below the stack guard. This saves an instruction
Nodir Turakulovdb2e73f2015-10-16 18:45:30 -070097 // in the checking sequence for tiny frames.
98 _StackSmall = 128
99
100 // The maximum number of bytes that a chain of NOSPLIT
101 // functions can use.
102 _StackLimit = _StackGuard - _StackSystem - _StackSmall
103)
104
Russ Coxd98553a2014-11-11 17:04:34 -0500105const (
Alex Brainman031c3bc2015-05-01 15:53:45 +1000106 // stackDebug == 0: no logging
Russ Coxd98553a2014-11-11 17:04:34 -0500107 // == 1: logging of per-stack operations
108 // == 2: logging of per-frame operations
109 // == 3: logging of per-word updates
110 // == 4: logging of per-word reads
111 stackDebug = 0
112 stackFromSystem = 0 // allocate stacks from system memory instead of the heap
113 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
114 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
Austin Clements47542522017-05-18 14:35:53 -0400115 stackNoCache = 0 // disable per-P small stack caches
Keith Randall1ea60c12016-12-02 15:17:52 -0800116
117 // check the BP links during traceback.
118 debugCheckBP = false
Russ Coxd98553a2014-11-11 17:04:34 -0500119)
120
121const (
Michael Matloob432cb662015-11-11 12:39:30 -0500122 uintptrMask = 1<<(8*sys.PtrSize) - 1
Russ Coxd98553a2014-11-11 17:04:34 -0500123
124 // Goroutine preemption request.
Russ Coxe6d35112015-01-05 16:29:21 +0000125 // Stored into g->stackguard0 to cause split stack check failure.
Russ Coxd98553a2014-11-11 17:04:34 -0500126 // Must be greater than any real sp.
127 // 0xfffffade in hex.
128 stackPreempt = uintptrMask & -1314
129
130 // Thread is forking.
Russ Coxe6d35112015-01-05 16:29:21 +0000131 // Stored into g->stackguard0 to cause split stack check failure.
Russ Coxd98553a2014-11-11 17:04:34 -0500132 // Must be greater than any real sp.
133 stackFork = uintptrMask & -1234
134)
135
136// Global pool of spans that have free stacks.
137// Stacks are assigned an order according to size.
138// order = log_2(size/FixedStack)
139// There is a free list for each order.
140// TODO: one lock per order?
Matthew Dempsky1652a2c2015-10-15 15:59:49 -0700141var stackpool [_NumStackOrders]mSpanList
Russ Coxd98553a2014-11-11 17:04:34 -0500142var stackpoolmu mutex
143
Austin Clements0cbf8d12015-12-14 14:30:25 -0500144// Global pool of large stack spans.
145var stackLarge struct {
146 lock mutex
147 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
148}
Russ Coxd98553a2014-11-11 17:04:34 -0500149
150func stackinit() {
151 if _StackCacheSize&_PageMask != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800152 throw("cache size must be a multiple of page size")
Russ Coxd98553a2014-11-11 17:04:34 -0500153 }
154 for i := range stackpool {
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800155 stackpool[i].init()
Russ Coxd98553a2014-11-11 17:04:34 -0500156 }
Austin Clements0cbf8d12015-12-14 14:30:25 -0500157 for i := range stackLarge.free {
158 stackLarge.free[i].init()
159 }
160}
161
162// stacklog2 returns ⌊log_2(n)⌋.
163func stacklog2(n uintptr) int {
164 log2 := 0
165 for n > 1 {
166 n >>= 1
167 log2++
168 }
169 return log2
Russ Coxd98553a2014-11-11 17:04:34 -0500170}
171
Brad Fitzpatrick5fea2cc2016-03-01 23:21:55 +0000172// Allocates a stack from the free pool. Must be called with
Russ Coxd98553a2014-11-11 17:04:34 -0500173// stackpoolmu held.
Rick Hudson8cfb0842014-11-20 12:08:13 -0500174func stackpoolalloc(order uint8) gclinkptr {
Russ Coxd98553a2014-11-11 17:04:34 -0500175 list := &stackpool[order]
Matthew Dempsky1652a2c2015-10-15 15:59:49 -0700176 s := list.first
177 if s == nil {
Brad Fitzpatrick5fea2cc2016-03-01 23:21:55 +0000178 // no free stacks. Allocate another span worth.
Austin Clements407c56ae2017-03-16 14:46:53 -0400179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
Russ Coxd98553a2014-11-11 17:04:34 -0500180 if s == nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800181 throw("out of memory")
Russ Coxd98553a2014-11-11 17:04:34 -0500182 }
Rick Hudsone4ac2d42016-02-16 17:16:43 -0500183 if s.allocCount != 0 {
184 throw("bad allocCount")
Russ Coxd98553a2014-11-11 17:04:34 -0500185 }
Austin Clementsab9db512017-03-16 15:02:02 -0400186 if s.manualFreeList.ptr() != nil {
187 throw("bad manualFreeList")
Russ Coxd98553a2014-11-11 17:04:34 -0500188 }
Austin Clements9741f022017-03-16 14:55:10 -0400189 s.elemsize = _FixedStack << order
190 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
Austin Clementsb7adc412016-04-28 10:59:00 -0400191 x := gclinkptr(s.base() + i)
Austin Clementsab9db512017-03-16 15:02:02 -0400192 x.ptr().next = s.manualFreeList
193 s.manualFreeList = x
Russ Coxd98553a2014-11-11 17:04:34 -0500194 }
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800195 list.insert(s)
Russ Coxd98553a2014-11-11 17:04:34 -0500196 }
Austin Clementsab9db512017-03-16 15:02:02 -0400197 x := s.manualFreeList
Rick Hudson8cfb0842014-11-20 12:08:13 -0500198 if x.ptr() == nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800199 throw("span has no free stacks")
Russ Coxd98553a2014-11-11 17:04:34 -0500200 }
Austin Clementsab9db512017-03-16 15:02:02 -0400201 s.manualFreeList = x.ptr().next
Rick Hudsone4ac2d42016-02-16 17:16:43 -0500202 s.allocCount++
Austin Clementsab9db512017-03-16 15:02:02 -0400203 if s.manualFreeList.ptr() == nil {
Russ Coxd98553a2014-11-11 17:04:34 -0500204 // all stacks in s are allocated.
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800205 list.remove(s)
Russ Coxd98553a2014-11-11 17:04:34 -0500206 }
207 return x
208}
209
Brad Fitzpatrick5fea2cc2016-03-01 23:21:55 +0000210// Adds stack x to the free pool. Must be called with stackpoolmu held.
Rick Hudson8cfb0842014-11-20 12:08:13 -0500211func stackpoolfree(x gclinkptr, order uint8) {
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800212 s := mheap_.lookup(unsafe.Pointer(x))
Austin Clements8fbaa4f2017-03-16 14:16:31 -0400213 if s.state != _MSpanManual {
Keith Randallb2a950b2014-12-27 20:58:00 -0800214 throw("freeing stack not in a stack span")
Russ Coxd98553a2014-11-11 17:04:34 -0500215 }
Austin Clementsab9db512017-03-16 15:02:02 -0400216 if s.manualFreeList.ptr() == nil {
Russ Coxd98553a2014-11-11 17:04:34 -0500217 // s will now have a free stack
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800218 stackpool[order].insert(s)
Russ Coxd98553a2014-11-11 17:04:34 -0500219 }
Austin Clementsab9db512017-03-16 15:02:02 -0400220 x.ptr().next = s.manualFreeList
221 s.manualFreeList = x
Rick Hudsone4ac2d42016-02-16 17:16:43 -0500222 s.allocCount--
223 if gcphase == _GCoff && s.allocCount == 0 {
Austin Clementsd57056b2015-06-22 10:24:50 -0400224 // Span is completely free. Return it to the heap
225 // immediately if we're sweeping.
226 //
227 // If GC is active, we delay the free until the end of
228 // GC to avoid the following type of situation:
229 //
230 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
231 // 2) The stack that pointer points to is copied
232 // 3) The old stack is freed
233 // 4) The containing span is marked free
234 // 5) GC attempts to mark the SudoG.elem pointer. The
235 // marking fails because the pointer looks like a
236 // pointer into a free span.
237 //
238 // By not freeing, we prevent step #4 until GC is done.
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800239 stackpool[order].remove(s)
Austin Clementsab9db512017-03-16 15:02:02 -0400240 s.manualFreeList = 0
Austin Clements407c56ae2017-03-16 14:46:53 -0400241 mheap_.freeManual(s, &memstats.stacks_inuse)
Russ Coxd98553a2014-11-11 17:04:34 -0500242 }
243}
244
245// stackcacherefill/stackcacherelease implement a global pool of stack segments.
246// The pool is required to prevent unlimited growth of per-thread caches.
Austin Clements6a86dbe2016-05-27 12:21:14 -0400247//
248//go:systemstack
Russ Coxd98553a2014-11-11 17:04:34 -0500249func stackcacherefill(c *mcache, order uint8) {
250 if stackDebug >= 1 {
251 print("stackcacherefill order=", order, "\n")
252 }
253
254 // Grab some stacks from the global cache.
255 // Grab half of the allowed capacity (to prevent thrashing).
Rick Hudson8cfb0842014-11-20 12:08:13 -0500256 var list gclinkptr
Russ Coxd98553a2014-11-11 17:04:34 -0500257 var size uintptr
258 lock(&stackpoolmu)
259 for size < _StackCacheSize/2 {
260 x := stackpoolalloc(order)
Rick Hudson8cfb0842014-11-20 12:08:13 -0500261 x.ptr().next = list
Russ Coxd98553a2014-11-11 17:04:34 -0500262 list = x
263 size += _FixedStack << order
264 }
265 unlock(&stackpoolmu)
266 c.stackcache[order].list = list
267 c.stackcache[order].size = size
268}
269
Austin Clements6a86dbe2016-05-27 12:21:14 -0400270//go:systemstack
Russ Coxd98553a2014-11-11 17:04:34 -0500271func stackcacherelease(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherelease order=", order, "\n")
274 }
275 x := c.stackcache[order].list
276 size := c.stackcache[order].size
277 lock(&stackpoolmu)
278 for size > _StackCacheSize/2 {
Rick Hudson8cfb0842014-11-20 12:08:13 -0500279 y := x.ptr().next
Russ Coxd98553a2014-11-11 17:04:34 -0500280 stackpoolfree(x, order)
281 x = y
282 size -= _FixedStack << order
283 }
284 unlock(&stackpoolmu)
285 c.stackcache[order].list = x
286 c.stackcache[order].size = size
287}
288
Austin Clements6a86dbe2016-05-27 12:21:14 -0400289//go:systemstack
Russ Coxd98553a2014-11-11 17:04:34 -0500290func stackcache_clear(c *mcache) {
291 if stackDebug >= 1 {
292 print("stackcache clear\n")
293 }
294 lock(&stackpoolmu)
295 for order := uint8(0); order < _NumStackOrders; order++ {
296 x := c.stackcache[order].list
Rick Hudson8cfb0842014-11-20 12:08:13 -0500297 for x.ptr() != nil {
298 y := x.ptr().next
Russ Coxd98553a2014-11-11 17:04:34 -0500299 stackpoolfree(x, order)
300 x = y
301 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500302 c.stackcache[order].list = 0
Russ Coxd98553a2014-11-11 17:04:34 -0500303 c.stackcache[order].size = 0
304 }
305 unlock(&stackpoolmu)
306}
307
Austin Clements6a86dbe2016-05-27 12:21:14 -0400308// stackalloc allocates an n byte stack.
309//
310// stackalloc must run on the system stack because it uses per-P
311// resources and must not split the stack.
312//
313//go:systemstack
Austin Clementsd089a6c2017-02-09 14:03:49 -0500314func stackalloc(n uint32) stack {
Russ Coxd98553a2014-11-11 17:04:34 -0500315 // Stackalloc must be called on scheduler stack, so that we
316 // never try to grow the stack during the code that stackalloc runs.
317 // Doing so would cause a deadlock (issue 1547).
318 thisg := getg()
319 if thisg != thisg.m.g0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800320 throw("stackalloc not on scheduler stack")
Russ Coxd98553a2014-11-11 17:04:34 -0500321 }
322 if n&(n-1) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800323 throw("stack size not a power of 2")
Russ Coxd98553a2014-11-11 17:04:34 -0500324 }
325 if stackDebug >= 1 {
326 print("stackalloc ", n, "\n")
327 }
328
329 if debug.efence != 0 || stackFromSystem != 0 {
Austin Clements8a1c5b22017-05-18 13:59:00 -0400330 n = uint32(round(uintptr(n), physPageSize))
331 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
Russ Coxd98553a2014-11-11 17:04:34 -0500332 if v == nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800333 throw("out of memory (stackalloc)")
Russ Coxd98553a2014-11-11 17:04:34 -0500334 }
Austin Clementsd089a6c2017-02-09 14:03:49 -0500335 return stack{uintptr(v), uintptr(v) + uintptr(n)}
Russ Coxd98553a2014-11-11 17:04:34 -0500336 }
337
338 // Small stacks are allocated with a fixed-size free-list allocator.
339 // If we need a stack of a bigger size, we fall back on allocating
340 // a dedicated span.
341 var v unsafe.Pointer
Austin Clements47542522017-05-18 14:35:53 -0400342 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
Russ Coxd98553a2014-11-11 17:04:34 -0500343 order := uint8(0)
344 n2 := n
345 for n2 > _FixedStack {
346 order++
347 n2 >>= 1
348 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500349 var x gclinkptr
Russ Coxd98553a2014-11-11 17:04:34 -0500350 c := thisg.m.mcache
Austin Clements47542522017-05-18 14:35:53 -0400351 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
Russ Coxd98553a2014-11-11 17:04:34 -0500352 // c == nil can happen in the guts of exitsyscall or
353 // procresize. Just get a stack from the global pool.
354 // Also don't touch stackcache during gc
355 // as it's flushed concurrently.
356 lock(&stackpoolmu)
357 x = stackpoolalloc(order)
358 unlock(&stackpoolmu)
359 } else {
360 x = c.stackcache[order].list
Rick Hudson8cfb0842014-11-20 12:08:13 -0500361 if x.ptr() == nil {
Russ Coxd98553a2014-11-11 17:04:34 -0500362 stackcacherefill(c, order)
363 x = c.stackcache[order].list
364 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500365 c.stackcache[order].list = x.ptr().next
Russ Coxd98553a2014-11-11 17:04:34 -0500366 c.stackcache[order].size -= uintptr(n)
367 }
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700368 v = unsafe.Pointer(x)
Russ Coxd98553a2014-11-11 17:04:34 -0500369 } else {
Austin Clements0cbf8d12015-12-14 14:30:25 -0500370 var s *mspan
371 npage := uintptr(n) >> _PageShift
372 log2npage := stacklog2(npage)
373
374 // Try to get a stack from the large stack cache.
375 lock(&stackLarge.lock)
376 if !stackLarge.free[log2npage].isEmpty() {
377 s = stackLarge.free[log2npage].first
378 stackLarge.free[log2npage].remove(s)
379 }
380 unlock(&stackLarge.lock)
381
Russ Coxd98553a2014-11-11 17:04:34 -0500382 if s == nil {
Austin Clements0cbf8d12015-12-14 14:30:25 -0500383 // Allocate a new stack from the heap.
Austin Clements407c56ae2017-03-16 14:46:53 -0400384 s = mheap_.allocManual(npage, &memstats.stacks_inuse)
Austin Clements0cbf8d12015-12-14 14:30:25 -0500385 if s == nil {
386 throw("out of memory")
387 }
Austin Clements9741f022017-03-16 14:55:10 -0400388 s.elemsize = uintptr(n)
Russ Coxd98553a2014-11-11 17:04:34 -0500389 }
Austin Clementsb7adc412016-04-28 10:59:00 -0400390 v = unsafe.Pointer(s.base())
Russ Coxd98553a2014-11-11 17:04:34 -0500391 }
392
393 if raceenabled {
394 racemalloc(v, uintptr(n))
395 }
Ian Lance Taylor73f329f2015-10-21 11:04:42 -0700396 if msanenabled {
397 msanmalloc(v, uintptr(n))
398 }
Russ Coxd98553a2014-11-11 17:04:34 -0500399 if stackDebug >= 1 {
400 print(" allocated ", v, "\n")
401 }
Austin Clementsd089a6c2017-02-09 14:03:49 -0500402 return stack{uintptr(v), uintptr(v) + uintptr(n)}
Russ Coxd98553a2014-11-11 17:04:34 -0500403}
404
Austin Clements6a86dbe2016-05-27 12:21:14 -0400405// stackfree frees an n byte stack allocation at stk.
406//
407// stackfree must run on the system stack because it uses per-P
408// resources and must not split the stack.
409//
410//go:systemstack
Austin Clements0993b2f2017-02-09 14:11:13 -0500411func stackfree(stk stack) {
Russ Coxd98553a2014-11-11 17:04:34 -0500412 gp := getg()
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700413 v := unsafe.Pointer(stk.lo)
Austin Clements0993b2f2017-02-09 14:11:13 -0500414 n := stk.hi - stk.lo
Russ Coxd98553a2014-11-11 17:04:34 -0500415 if n&(n-1) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800416 throw("stack not a power of 2")
Russ Coxd98553a2014-11-11 17:04:34 -0500417 }
Austin Clementse610c252015-05-20 15:29:53 -0400418 if stk.lo+n < stk.hi {
419 throw("bad stack size")
420 }
Russ Coxd98553a2014-11-11 17:04:34 -0500421 if stackDebug >= 1 {
422 println("stackfree", v, n)
Austin Clements87e48c52016-10-17 18:41:56 -0400423 memclrNoHeapPointers(v, n) // for testing, clobber stack data
Russ Coxd98553a2014-11-11 17:04:34 -0500424 }
425 if debug.efence != 0 || stackFromSystem != 0 {
426 if debug.efence != 0 || stackFaultOnFree != 0 {
427 sysFault(v, n)
428 } else {
429 sysFree(v, n, &memstats.stacks_sys)
430 }
431 return
432 }
Ian Lance Taylor73f329f2015-10-21 11:04:42 -0700433 if msanenabled {
434 msanfree(v, n)
435 }
Austin Clements47542522017-05-18 14:35:53 -0400436 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
Russ Coxd98553a2014-11-11 17:04:34 -0500437 order := uint8(0)
438 n2 := n
439 for n2 > _FixedStack {
440 order++
441 n2 >>= 1
442 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500443 x := gclinkptr(v)
Russ Coxd98553a2014-11-11 17:04:34 -0500444 c := gp.m.mcache
Austin Clements47542522017-05-18 14:35:53 -0400445 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
Russ Coxd98553a2014-11-11 17:04:34 -0500446 lock(&stackpoolmu)
447 stackpoolfree(x, order)
448 unlock(&stackpoolmu)
449 } else {
450 if c.stackcache[order].size >= _StackCacheSize {
451 stackcacherelease(c, order)
452 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500453 x.ptr().next = c.stackcache[order].list
Russ Coxd98553a2014-11-11 17:04:34 -0500454 c.stackcache[order].list = x
455 c.stackcache[order].size += n
456 }
457 } else {
Matthew Dempskyc17c42e2015-11-11 16:13:51 -0800458 s := mheap_.lookup(v)
Austin Clements8fbaa4f2017-03-16 14:16:31 -0400459 if s.state != _MSpanManual {
Austin Clementsb7adc412016-04-28 10:59:00 -0400460 println(hex(s.base()), v)
Keith Randallb2a950b2014-12-27 20:58:00 -0800461 throw("bad span state")
Russ Coxd98553a2014-11-11 17:04:34 -0500462 }
Austin Clementsd57056b2015-06-22 10:24:50 -0400463 if gcphase == _GCoff {
464 // Free the stack immediately if we're
465 // sweeping.
Austin Clements407c56ae2017-03-16 14:46:53 -0400466 mheap_.freeManual(s, &memstats.stacks_inuse)
Austin Clementsd57056b2015-06-22 10:24:50 -0400467 } else {
Austin Clements0cbf8d12015-12-14 14:30:25 -0500468 // If the GC is running, we can't return a
469 // stack span to the heap because it could be
470 // reused as a heap span, and this state
471 // change would race with GC. Add it to the
472 // large stack cache instead.
473 log2npage := stacklog2(s.npages)
474 lock(&stackLarge.lock)
475 stackLarge.free[log2npage].insert(s)
476 unlock(&stackLarge.lock)
Austin Clementsd57056b2015-06-22 10:24:50 -0400477 }
Russ Coxd98553a2014-11-11 17:04:34 -0500478 }
479}
480
481var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
482
Russ Cox4d0f3a12015-04-27 22:45:57 -0400483var ptrnames = []string{
484 0: "scalar",
485 1: "ptr",
Russ Coxd98553a2014-11-11 17:04:34 -0500486}
487
488// Stack frame layout
489//
490// (x86)
491// +------------------+
492// | args from caller |
493// +------------------+ <- frame->argp
494// | return address |
Austin Clements3c0fee12015-01-14 11:09:50 -0500495// +------------------+
496// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
Russ Coxd98553a2014-11-11 17:04:34 -0500497// +------------------+ <- frame->varp
498// | locals |
499// +------------------+
500// | args to callee |
501// +------------------+ <- frame->sp
502//
503// (arm)
504// +------------------+
505// | args from caller |
506// +------------------+ <- frame->argp
507// | caller's retaddr |
508// +------------------+ <- frame->varp
509// | locals |
510// +------------------+
511// | args to callee |
512// +------------------+
513// | return address |
514// +------------------+ <- frame->sp
515
516type adjustinfo struct {
517 old stack
518 delta uintptr // ptr distance from old to new stack (newbase - oldbase)
Austin Clementsbeedb1e2015-08-12 23:43:43 -0400519 cache pcvalueCache
Austin Clements276b1772016-02-15 17:38:06 -0500520
521 // sghi is the highest sudog.elem on the stack.
522 sghi uintptr
Russ Coxd98553a2014-11-11 17:04:34 -0500523}
524
525// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
526// If so, it rewrites *vpp to point into the new stack.
527func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
Austin Clementsd62d8312015-11-23 11:34:16 -0500528 pp := (*uintptr)(vpp)
Russ Coxd98553a2014-11-11 17:04:34 -0500529 p := *pp
530 if stackDebug >= 4 {
Austin Clementsd62d8312015-11-23 11:34:16 -0500531 print(" ", pp, ":", hex(p), "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500532 }
Austin Clementsd62d8312015-11-23 11:34:16 -0500533 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
534 *pp = p + adjinfo.delta
Russ Coxd98553a2014-11-11 17:04:34 -0500535 if stackDebug >= 3 {
Austin Clementsd62d8312015-11-23 11:34:16 -0500536 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500537 }
538 }
539}
540
Russ Cox0234dfd2015-05-04 10:19:24 -0400541// Information from the compiler about the layout of stack frames.
542type bitvector struct {
543 n int32 // # of bits
544 bytedata *uint8
545}
546
Russ Coxd98553a2014-11-11 17:04:34 -0500547type gobitvector struct {
548 n uintptr
549 bytedata []uint8
550}
551
552func gobv(bv bitvector) gobitvector {
553 return gobitvector{
554 uintptr(bv.n),
555 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
556 }
557}
558
Russ Cox4d0f3a12015-04-27 22:45:57 -0400559func ptrbit(bv *gobitvector, i uintptr) uint8 {
560 return (bv.bytedata[i/8] >> (i % 8)) & 1
Russ Coxd98553a2014-11-11 17:04:34 -0500561}
562
563// bv describes the memory starting at address scanp.
564// Adjust any pointers contained therein.
Austin Clements0efc8b22017-02-20 22:37:07 -0500565func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) {
Russ Coxd98553a2014-11-11 17:04:34 -0500566 bv := gobv(*cbv)
567 minp := adjinfo.old.lo
568 maxp := adjinfo.old.hi
569 delta := adjinfo.delta
Matthew Dempskya03bdc32016-02-29 15:01:00 -0800570 num := bv.n
Austin Clements276b1772016-02-15 17:38:06 -0500571 // If this frame might contain channel receive slots, use CAS
572 // to adjust pointers. If the slot hasn't been received into
573 // yet, it may contain stack pointers and a concurrent send
574 // could race with adjusting those pointers. (The sent value
575 // itself can never contain stack pointers.)
576 useCAS := uintptr(scanp) < adjinfo.sghi
Russ Coxd98553a2014-11-11 17:04:34 -0500577 for i := uintptr(0); i < num; i++ {
578 if stackDebug >= 4 {
Michael Matloob432cb662015-11-11 12:39:30 -0500579 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500580 }
Daniel Martí59413d32017-08-17 15:51:35 +0100581 if ptrbit(&bv, i) != 1 {
582 continue
583 }
584 pp := (*uintptr)(add(scanp, i*sys.PtrSize))
585 retry:
586 p := *pp
587 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
588 // Looks like a junk value in a pointer slot.
589 // Live analysis wrong?
590 getg().m.traceback = 2
591 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
592 throw("invalid pointer found on stack")
593 }
594 if minp <= p && p < maxp {
595 if stackDebug >= 3 {
596 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500597 }
Daniel Martí59413d32017-08-17 15:51:35 +0100598 if useCAS {
599 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
600 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
601 goto retry
Russ Coxd98553a2014-11-11 17:04:34 -0500602 }
Daniel Martí59413d32017-08-17 15:51:35 +0100603 } else {
604 *pp = p + delta
Russ Coxd98553a2014-11-11 17:04:34 -0500605 }
606 }
607 }
608}
609
610// Note: the argument/return area is adjusted by the callee.
611func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
612 adjinfo := (*adjustinfo)(arg)
613 targetpc := frame.continpc
614 if targetpc == 0 {
615 // Frame is dead.
616 return true
617 }
618 f := frame.fn
619 if stackDebug >= 2 {
620 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
621 }
Russ Cox656be312014-11-12 14:54:31 -0500622 if f.entry == systemstack_switchPC {
623 // A special routine at the bottom of stack of a goroutine that does an systemstack call.
Russ Coxd98553a2014-11-11 17:04:34 -0500624 // We will allow it to be copied even though we don't
625 // have full GC info for it (because it is written in asm).
626 return true
627 }
628 if targetpc != f.entry {
629 targetpc--
630 }
Austin Clementsbeedb1e2015-08-12 23:43:43 -0400631 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
Russ Coxd98553a2014-11-11 17:04:34 -0500632 if pcdata == -1 {
633 pcdata = 0 // in prologue
634 }
635
636 // Adjust local variables if stack frame has been allocated.
637 size := frame.varp - frame.sp
638 var minsize uintptr
Jeremy Jackinsba09d062016-04-07 15:42:35 +0900639 switch sys.ArchFamily {
640 case sys.ARM64:
Michael Matloob432cb662015-11-11 12:39:30 -0500641 minsize = sys.SpAlign
Aram Hăvărneanu846ee042015-03-08 14:20:20 +0100642 default:
Michael Matloob432cb662015-11-11 12:39:30 -0500643 minsize = sys.MinFrameSize
Russ Coxd98553a2014-11-11 17:04:34 -0500644 }
645 if size > minsize {
646 var bv bitvector
647 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
648 if stackmap == nil || stackmap.n <= 0 {
649 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
Keith Randallb2a950b2014-12-27 20:58:00 -0800650 throw("missing stackmap")
Russ Coxd98553a2014-11-11 17:04:34 -0500651 }
652 // Locals bitmap information, scan just the pointers in locals.
653 if pcdata < 0 || pcdata >= stackmap.n {
654 // don't know where we are
655 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
Keith Randallb2a950b2014-12-27 20:58:00 -0800656 throw("bad symbol table")
Russ Coxd98553a2014-11-11 17:04:34 -0500657 }
658 bv = stackmapdata(stackmap, pcdata)
Michael Matloob432cb662015-11-11 12:39:30 -0500659 size = uintptr(bv.n) * sys.PtrSize
Russ Coxd98553a2014-11-11 17:04:34 -0500660 if stackDebug >= 3 {
Michael Matloob432cb662015-11-11 12:39:30 -0500661 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500662 }
663 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
664 }
665
Austin Clements3c0fee12015-01-14 11:09:50 -0500666 // Adjust saved base pointer if there is one.
Jeremy Jackinsba09d062016-04-07 15:42:35 +0900667 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
Austin Clements3c0fee12015-01-14 11:09:50 -0500668 if !framepointer_enabled {
Austin Clementsc901bd02015-02-03 08:35:38 -0500669 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
Austin Clements67a03fd2015-02-03 09:09:56 -0500670 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
Austin Clements3c0fee12015-01-14 11:09:50 -0500671 throw("bad frame layout")
672 }
673 if stackDebug >= 3 {
674 print(" saved bp\n")
675 }
Keith Randall1ea60c12016-12-02 15:17:52 -0800676 if debugCheckBP {
677 // Frame pointers should always point to the next higher frame on
678 // the Go stack (or be nil, for the top frame on the stack).
679 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
680 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
681 println("runtime: found invalid frame pointer")
682 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
683 throw("bad frame pointer")
684 }
685 }
Austin Clements3c0fee12015-01-14 11:09:50 -0500686 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
687 }
688
Russ Coxd98553a2014-11-11 17:04:34 -0500689 // Adjust arguments.
690 if frame.arglen > 0 {
691 var bv bitvector
692 if frame.argmap != nil {
693 bv = *frame.argmap
694 } else {
695 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
696 if stackmap == nil || stackmap.n <= 0 {
Matthew Dempskya03bdc32016-02-29 15:01:00 -0800697 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
Keith Randallb2a950b2014-12-27 20:58:00 -0800698 throw("missing stackmap")
Russ Coxd98553a2014-11-11 17:04:34 -0500699 }
700 if pcdata < 0 || pcdata >= stackmap.n {
701 // don't know where we are
702 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
Keith Randallb2a950b2014-12-27 20:58:00 -0800703 throw("bad symbol table")
Russ Coxd98553a2014-11-11 17:04:34 -0500704 }
705 bv = stackmapdata(stackmap, pcdata)
706 }
707 if stackDebug >= 3 {
708 print(" args\n")
709 }
Austin Clements0efc8b22017-02-20 22:37:07 -0500710 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{})
Russ Coxd98553a2014-11-11 17:04:34 -0500711 }
712 return true
713}
714
715func adjustctxt(gp *g, adjinfo *adjustinfo) {
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700716 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
Keith Randall1ea60c12016-12-02 15:17:52 -0800717 if !framepointer_enabled {
718 return
719 }
720 if debugCheckBP {
721 bp := gp.sched.bp
722 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
723 println("runtime: found invalid top frame pointer")
724 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
725 throw("bad top frame pointer")
726 }
727 }
728 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
Russ Coxd98553a2014-11-11 17:04:34 -0500729}
730
731func adjustdefers(gp *g, adjinfo *adjustinfo) {
732 // Adjust defer argument blocks the same way we adjust active stack frames.
733 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
734
735 // Adjust pointers in the Defer structs.
736 // Defer structs themselves are never on the stack.
737 for d := gp._defer; d != nil; d = d.link {
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700738 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
739 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
740 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
Russ Coxd98553a2014-11-11 17:04:34 -0500741 }
742}
743
744func adjustpanics(gp *g, adjinfo *adjustinfo) {
745 // Panics are on stack and already adjusted.
746 // Update pointer to head of list in G.
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700747 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
Russ Coxd98553a2014-11-11 17:04:34 -0500748}
749
750func adjustsudogs(gp *g, adjinfo *adjustinfo) {
751 // the data elements pointed to by a SudoG structure
752 // might be in the stack.
753 for s := gp.waiting; s != nil; s = s.waitlink {
Matthew Dempsky4c2465d2015-10-15 14:33:50 -0700754 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
Russ Coxd98553a2014-11-11 17:04:34 -0500755 }
756}
757
758func fillstack(stk stack, b byte) {
759 for p := stk.lo; p < stk.hi; p++ {
760 *(*byte)(unsafe.Pointer(p)) = b
761 }
762}
763
Austin Clements276b1772016-02-15 17:38:06 -0500764func findsghi(gp *g, stk stack) uintptr {
765 var sghi uintptr
766 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
767 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
768 if stk.lo <= p && p < stk.hi && p > sghi {
769 sghi = p
770 }
Austin Clements276b1772016-02-15 17:38:06 -0500771 }
772 return sghi
773}
774
775// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
776// stack they refer to while synchronizing with concurrent channel
777// operations. It returns the number of bytes of stack copied.
778func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
779 if gp.waiting == nil {
780 return 0
781 }
782
783 // Lock channels to prevent concurrent send/receive.
784 // It's important that we *only* do this for async
785 // copystack; otherwise, gp may be in the middle of
786 // putting itself on wait queues and this would
787 // self-deadlock.
Ian Lance Taylor84bb9e62016-07-07 17:43:08 -0700788 var lastc *hchan
Austin Clements276b1772016-02-15 17:38:06 -0500789 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
Ian Lance Taylor84bb9e62016-07-07 17:43:08 -0700790 if sg.c != lastc {
791 lock(&sg.c.lock)
792 }
793 lastc = sg.c
Austin Clements276b1772016-02-15 17:38:06 -0500794 }
795
796 // Adjust sudogs.
797 adjustsudogs(gp, adjinfo)
798
799 // Copy the part of the stack the sudogs point in to
800 // while holding the lock to prevent races on
801 // send/receive slots.
802 var sgsize uintptr
803 if adjinfo.sghi != 0 {
804 oldBot := adjinfo.old.hi - used
805 newBot := oldBot + adjinfo.delta
806 sgsize = adjinfo.sghi - oldBot
807 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
808 }
809
810 // Unlock channels.
Ian Lance Taylor84bb9e62016-07-07 17:43:08 -0700811 lastc = nil
Austin Clements276b1772016-02-15 17:38:06 -0500812 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
Ian Lance Taylor84bb9e62016-07-07 17:43:08 -0700813 if sg.c != lastc {
814 unlock(&sg.c.lock)
815 }
816 lastc = sg.c
Austin Clements276b1772016-02-15 17:38:06 -0500817 }
818
819 return sgsize
820}
821
Russ Coxd98553a2014-11-11 17:04:34 -0500822// Copies gp's stack to a new stack of a different size.
Russ Cox0fcf54b2014-11-15 08:00:38 -0500823// Caller must have changed gp status to Gcopystack.
Austin Clements276b1772016-02-15 17:38:06 -0500824//
825// If sync is true, this is a self-triggered stack growth and, in
826// particular, no other G may be writing to gp's stack (e.g., via a
827// channel operation). If sync is false, copystack protects against
828// concurrent channel operations.
829func copystack(gp *g, newsize uintptr, sync bool) {
Russ Coxd98553a2014-11-11 17:04:34 -0500830 if gp.syscallsp != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800831 throw("stack growth not allowed in system call")
Russ Coxd98553a2014-11-11 17:04:34 -0500832 }
833 old := gp.stack
834 if old.lo == 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800835 throw("nil stackbase")
Russ Coxd98553a2014-11-11 17:04:34 -0500836 }
837 used := old.hi - gp.sched.sp
838
839 // allocate new stack
Austin Clementsd089a6c2017-02-09 14:03:49 -0500840 new := stackalloc(uint32(newsize))
Russ Coxd98553a2014-11-11 17:04:34 -0500841 if stackPoisonCopy != 0 {
842 fillstack(new, 0xfd)
843 }
844 if stackDebug >= 1 {
Austin Clements0993b2f2017-02-09 14:11:13 -0500845 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500846 }
847
Austin Clementsda153352016-02-16 12:23:33 -0500848 // Compute adjustment.
849 var adjinfo adjustinfo
850 adjinfo.old = old
851 adjinfo.delta = new.hi - old.hi
852
Austin Clements276b1772016-02-15 17:38:06 -0500853 // Adjust sudogs, synchronizing with channel ops if necessary.
854 ncopy := used
855 if sync {
856 adjustsudogs(gp, &adjinfo)
857 } else {
858 // sudogs can point in to the stack. During concurrent
859 // shrinking, these areas may be written to. Find the
860 // highest such pointer so we can handle everything
861 // there and below carefully. (This shouldn't be far
862 // from the bottom of the stack, so there's little
863 // cost in handling everything below it carefully.)
864 adjinfo.sghi = findsghi(gp, old)
865
866 // Synchronize with channel ops and copy the part of
867 // the stack they may interact with.
868 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
869 }
870
871 // Copy the stack (or the rest of it) to the new location
872 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
Austin Clementsda153352016-02-16 12:23:33 -0500873
Austin Clements276b1772016-02-15 17:38:06 -0500874 // Adjust remaining structures that have pointers into stacks.
875 // We have to do most of these before we traceback the new
876 // stack because gentraceback uses them.
Russ Coxd98553a2014-11-11 17:04:34 -0500877 adjustctxt(gp, &adjinfo)
878 adjustdefers(gp, &adjinfo)
879 adjustpanics(gp, &adjinfo)
Austin Clements276b1772016-02-15 17:38:06 -0500880 if adjinfo.sghi != 0 {
881 adjinfo.sghi += adjinfo.delta
882 }
Russ Coxd98553a2014-11-11 17:04:34 -0500883
Russ Coxd98553a2014-11-11 17:04:34 -0500884 // Swap out old stack for new one
885 gp.stack = new
Russ Coxe6d35112015-01-05 16:29:21 +0000886 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
Russ Coxd98553a2014-11-11 17:04:34 -0500887 gp.sched.sp = new.hi - used
Russ Cox9c04d002015-08-26 11:39:10 -0400888 gp.stktopsp += adjinfo.delta
Russ Coxd98553a2014-11-11 17:04:34 -0500889
Austin Clementsda153352016-02-16 12:23:33 -0500890 // Adjust pointers in the new stack.
891 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
892
Russ Coxd98553a2014-11-11 17:04:34 -0500893 // free old stack
894 if stackPoisonCopy != 0 {
895 fillstack(old, 0xfc)
896 }
Austin Clements0993b2f2017-02-09 14:11:13 -0500897 stackfree(old)
Russ Coxd98553a2014-11-11 17:04:34 -0500898}
899
900// round x up to a power of 2.
901func round2(x int32) int32 {
902 s := uint(0)
903 for 1<<s < x {
904 s++
905 }
906 return 1 << s
907}
908
909// Called from runtime·morestack when more stack is needed.
910// Allocate larger stack and relocate to new stack.
911// Stack growth is multiplicative, for constant amortized cost.
912//
913// g->atomicstatus will be Grunning or Gscanrunning upon entry.
914// If the GC is trying to stop this g then it will set preemptscan to true.
Austin Clementsbf9c71c2016-10-19 18:27:39 -0400915//
Austin Clements3beaf262017-10-22 21:37:05 -0400916// This must be nowritebarrierrec because it can be called as part of
917// stack growth from other nowritebarrierrec functions, but the
918// compiler doesn't check this.
919//
920//go:nowritebarrierrec
921func newstack() {
Russ Coxd98553a2014-11-11 17:04:34 -0500922 thisg := getg()
923 // TODO: double check all gp. shouldn't be getg().
Russ Coxdcec1232014-12-22 10:53:51 -0500924 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
Keith Randallb2a950b2014-12-27 20:58:00 -0800925 throw("stack growth after fork")
Russ Coxd98553a2014-11-11 17:04:34 -0500926 }
Russ Coxdcec1232014-12-22 10:53:51 -0500927 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
Shenghou Ma24be0992015-08-24 21:24:23 -0400928 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
Russ Coxd98553a2014-11-11 17:04:34 -0500929 morebuf := thisg.m.morebuf
Russ Coxdcec1232014-12-22 10:53:51 -0500930 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
Keith Randallb2a950b2014-12-27 20:58:00 -0800931 throw("runtime: wrong goroutine in newstack")
Russ Coxd98553a2014-11-11 17:04:34 -0500932 }
Austin Clementsbf9c71c2016-10-19 18:27:39 -0400933
934 gp := thisg.m.curg
Austin Clementsbf9c71c2016-10-19 18:27:39 -0400935
Russ Coxd98553a2014-11-11 17:04:34 -0500936 if thisg.m.curg.throwsplit {
Russ Coxd98553a2014-11-11 17:04:34 -0500937 // Update syscallsp, syscallpc in case traceback uses them.
938 morebuf := thisg.m.morebuf
939 gp.syscallsp = morebuf.sp
940 gp.syscallpc = morebuf.pc
Austin Clements44213332017-12-14 15:32:12 -0500941 pcname, pcoff := "(unknown)", uintptr(0)
942 f := findfunc(gp.sched.pc)
943 if f.valid() {
944 pcname = funcname(f)
945 pcoff = gp.sched.pc - f.entry
946 }
947 print("runtime: newstack at ", pcname, "+", hex(pcoff),
948 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
Russ Coxd98553a2014-11-11 17:04:34 -0500949 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
950 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
Russ Coxdcec1232014-12-22 10:53:51 -0500951
Austin Clements4671da02017-11-22 15:29:03 -0500952 thisg.m.traceback = 2 // Include runtime frames
Russ Coxdcec1232014-12-22 10:53:51 -0500953 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
Keith Randallb2a950b2014-12-27 20:58:00 -0800954 throw("runtime: stack split at bad time")
Russ Coxd98553a2014-11-11 17:04:34 -0500955 }
956
Russ Coxd98553a2014-11-11 17:04:34 -0500957 morebuf := thisg.m.morebuf
958 thisg.m.morebuf.pc = 0
959 thisg.m.morebuf.lr = 0
960 thisg.m.morebuf.sp = 0
Russ Coxdcec1232014-12-22 10:53:51 -0500961 thisg.m.morebuf.g = 0
Russ Coxd98553a2014-11-11 17:04:34 -0500962
Russ Coxf6d00542015-01-14 16:36:41 -0500963 // NOTE: stackguard0 may change underfoot, if another thread
964 // is about to try to preempt gp. Read it just once and use that same
965 // value now and below.
Michael Matloob67faca72015-11-02 14:09:24 -0500966 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
Russ Coxf6d00542015-01-14 16:36:41 -0500967
Russ Coxaae0f072015-01-13 15:55:16 -0500968 // Be conservative about where we preempt.
969 // We are interested in preempting user Go code, not runtime code.
Austin Clements28b51182015-01-30 15:30:41 -0500970 // If we're holding locks, mallocing, or preemption is disabled, don't
971 // preempt.
Russ Coxaae0f072015-01-13 15:55:16 -0500972 // This check is very early in newstack so that even the status change
973 // from Grunning to Gwaiting and back doesn't happen in this case.
974 // That status change by itself can be viewed as a small preemption,
975 // because the GC might change Gwaiting to Gscanwaiting, and then
976 // this goroutine has to wait for the GC to finish before continuing.
977 // If the GC is in some way dependent on this goroutine (for example,
978 // it needs a lock held by the goroutine), that small preemption turns
979 // into a real deadlock.
Russ Coxf6d00542015-01-14 16:36:41 -0500980 if preempt {
Russ Cox181e26b2015-04-17 00:21:30 -0400981 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
Russ Coxaae0f072015-01-13 15:55:16 -0500982 // Let the goroutine keep running for now.
983 // gp->preempt is set, so it will be preempted next time.
984 gp.stackguard0 = gp.stack.lo + _StackGuard
985 gogo(&gp.sched) // never return
986 }
987 }
988
Russ Coxd98553a2014-11-11 17:04:34 -0500989 if gp.stack.lo == 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800990 throw("missing stack in newstack")
Russ Coxd98553a2014-11-11 17:04:34 -0500991 }
992 sp := gp.sched.sp
Jeremy Jackinsba09d062016-04-07 15:42:35 +0900993 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 {
Russ Coxd98553a2014-11-11 17:04:34 -0500994 // The call to morestack cost a word.
Michael Matloob432cb662015-11-11 12:39:30 -0500995 sp -= sys.PtrSize
Russ Coxd98553a2014-11-11 17:04:34 -0500996 }
997 if stackDebug >= 1 || sp < gp.stack.lo {
998 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
999 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1000 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1001 }
1002 if sp < gp.stack.lo {
1003 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
1004 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
Keith Randallb2a950b2014-12-27 20:58:00 -08001005 throw("runtime: split stack overflow")
Russ Coxd98553a2014-11-11 17:04:34 -05001006 }
1007
Russ Coxf6d00542015-01-14 16:36:41 -05001008 if preempt {
Russ Coxd98553a2014-11-11 17:04:34 -05001009 if gp == thisg.m.g0 {
Keith Randallb2a950b2014-12-27 20:58:00 -08001010 throw("runtime: preempt g0")
Russ Coxd98553a2014-11-11 17:04:34 -05001011 }
Russ Cox181e26b2015-04-17 00:21:30 -04001012 if thisg.m.p == 0 && thisg.m.locks == 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -08001013 throw("runtime: g is running but p is not")
Russ Coxd98553a2014-11-11 17:04:34 -05001014 }
Austin Clements3c2a21f2016-02-25 15:37:40 -05001015 // Synchronize with scang.
1016 casgstatus(gp, _Grunning, _Gwaiting)
Russ Coxd98553a2014-11-11 17:04:34 -05001017 if gp.preemptscan {
Rick Hudson273507a2014-11-21 16:46:27 -05001018 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
Austin Clements724f8292015-05-28 12:37:12 -04001019 // Likely to be racing with the GC as
1020 // it sees a _Gwaiting and does the
1021 // stack scan. If so, gcworkdone will
1022 // be set and gcphasework will simply
1023 // return.
Rick Hudson273507a2014-11-21 16:46:27 -05001024 }
Russ Cox3c60e6e2015-06-16 19:20:18 -04001025 if !gp.gcscandone {
Austin Clements3be48b42016-05-23 22:14:53 -04001026 // gcw is safe because we're on the
1027 // system stack.
1028 gcw := &gp.m.p.ptr().gcw
1029 scanstack(gp, gcw)
1030 if gcBlackenPromptly {
1031 gcw.dispose()
1032 }
Russ Cox3c60e6e2015-06-16 19:20:18 -04001033 gp.gcscandone = true
1034 }
1035 gp.preemptscan = false
1036 gp.preempt = false
Rick Hudson273507a2014-11-21 16:46:27 -05001037 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
Austin Clements5b765ce2016-03-11 14:08:10 -05001038 // This clears gcscanvalid.
Russ Coxd98553a2014-11-11 17:04:34 -05001039 casgstatus(gp, _Gwaiting, _Grunning)
Russ Coxe6d35112015-01-05 16:29:21 +00001040 gp.stackguard0 = gp.stack.lo + _StackGuard
Russ Cox3c60e6e2015-06-16 19:20:18 -04001041 gogo(&gp.sched) // never return
Russ Coxd98553a2014-11-11 17:04:34 -05001042 }
1043
Russ Coxd98553a2014-11-11 17:04:34 -05001044 // Act like goroutine called runtime.Gosched.
1045 casgstatus(gp, _Gwaiting, _Grunning)
Dmitry Vyukov5288fad2014-12-12 18:41:57 +01001046 gopreempt_m(gp) // never return
Russ Coxd98553a2014-11-11 17:04:34 -05001047 }
1048
1049 // Allocate a bigger segment and move the stack.
Austin Clements0993b2f2017-02-09 14:11:13 -05001050 oldsize := gp.stack.hi - gp.stack.lo
Russ Coxd98553a2014-11-11 17:04:34 -05001051 newsize := oldsize * 2
Austin Clements0993b2f2017-02-09 14:11:13 -05001052 if newsize > maxstacksize {
Russ Coxd98553a2014-11-11 17:04:34 -05001053 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
Keith Randallb2a950b2014-12-27 20:58:00 -08001054 throw("stack overflow")
Russ Coxd98553a2014-11-11 17:04:34 -05001055 }
1056
Austin Clements3c2a21f2016-02-25 15:37:40 -05001057 // The goroutine must be executing in order to call newstack,
1058 // so it must be Grunning (or Gscanrunning).
1059 casgstatus(gp, _Grunning, _Gcopystack)
Russ Cox0fcf54b2014-11-15 08:00:38 -05001060
1061 // The concurrent GC will not scan the stack while we are doing the copy since
1062 // the gp is in a Gcopystack status.
Austin Clements0993b2f2017-02-09 14:11:13 -05001063 copystack(gp, newsize, true)
Russ Coxd98553a2014-11-11 17:04:34 -05001064 if stackDebug >= 1 {
1065 print("stack grow done\n")
1066 }
Russ Cox0fcf54b2014-11-15 08:00:38 -05001067 casgstatus(gp, _Gcopystack, _Grunning)
Russ Coxd98553a2014-11-11 17:04:34 -05001068 gogo(&gp.sched)
1069}
1070
1071//go:nosplit
1072func nilfunc() {
1073 *(*uint8)(nil) = 0
1074}
1075
1076// adjust Gobuf as if it executed a call to fn
1077// and then did an immediate gosave.
1078func gostartcallfn(gobuf *gobuf, fv *funcval) {
1079 var fn unsafe.Pointer
1080 if fv != nil {
Matthew Dempsky4c2465d2015-10-15 14:33:50 -07001081 fn = unsafe.Pointer(fv.fn)
Russ Coxd98553a2014-11-11 17:04:34 -05001082 } else {
1083 fn = unsafe.Pointer(funcPC(nilfunc))
1084 }
Matthew Dempsky4c2465d2015-10-15 14:33:50 -07001085 gostartcall(gobuf, fn, unsafe.Pointer(fv))
Russ Coxd98553a2014-11-11 17:04:34 -05001086}
1087
1088// Maybe shrink the stack being used by gp.
1089// Called at garbage collection time.
Austin Clements276b1772016-02-15 17:38:06 -05001090// gp must be stopped, but the world need not be.
Russ Coxd98553a2014-11-11 17:04:34 -05001091func shrinkstack(gp *g) {
Austin Clementsf11e4eb2016-02-15 18:30:48 -05001092 gstatus := readgstatus(gp)
1093 if gstatus&^_Gscan == _Gdead {
Russ Coxd98553a2014-11-11 17:04:34 -05001094 if gp.stack.lo != 0 {
1095 // Free whole stack - it will get reallocated
1096 // if G is used again.
Austin Clements0993b2f2017-02-09 14:11:13 -05001097 stackfree(gp.stack)
Russ Coxd98553a2014-11-11 17:04:34 -05001098 gp.stack.lo = 0
1099 gp.stack.hi = 0
1100 }
1101 return
1102 }
1103 if gp.stack.lo == 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -08001104 throw("missing stack in shrinkstack")
Russ Coxd98553a2014-11-11 17:04:34 -05001105 }
Austin Clementsf11e4eb2016-02-15 18:30:48 -05001106 if gstatus&_Gscan == 0 {
1107 throw("bad status in shrinkstack")
1108 }
Russ Coxd98553a2014-11-11 17:04:34 -05001109
Russ Coxd5b40b62015-06-05 11:51:49 -04001110 if debug.gcshrinkstackoff > 0 {
1111 return
1112 }
Austin Clementsd6625ca2016-10-24 14:20:07 -04001113 if gp.startpc == gcBgMarkWorkerPC {
1114 // We're not allowed to shrink the gcBgMarkWorker
1115 // stack (see gcBgMarkWorker for explanation).
1116 return
1117 }
Russ Coxd5b40b62015-06-05 11:51:49 -04001118
Austin Clements0993b2f2017-02-09 14:11:13 -05001119 oldsize := gp.stack.hi - gp.stack.lo
Russ Coxd98553a2014-11-11 17:04:34 -05001120 newsize := oldsize / 2
Austin Clements73871212015-06-04 17:28:02 -04001121 // Don't shrink the allocation below the minimum-sized stack
1122 // allocation.
Russ Coxd98553a2014-11-11 17:04:34 -05001123 if newsize < _FixedStack {
Austin Clements73871212015-06-04 17:28:02 -04001124 return
Russ Coxd98553a2014-11-11 17:04:34 -05001125 }
Austin Clements73871212015-06-04 17:28:02 -04001126 // Compute how much of the stack is currently in use and only
1127 // shrink the stack if gp is using less than a quarter of its
1128 // current stack. The currently used stack includes everything
1129 // down to the SP plus the stack guard space that ensures
1130 // there's room for nosplit functions.
1131 avail := gp.stack.hi - gp.stack.lo
1132 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1133 return
Russ Coxd98553a2014-11-11 17:04:34 -05001134 }
1135
1136 // We can't copy the stack if we're in a syscall.
1137 // The syscall might have pointers into the stack.
1138 if gp.syscallsp != 0 {
1139 return
1140 }
Michael Matloob432cb662015-11-11 12:39:30 -05001141 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
Russ Coxd98553a2014-11-11 17:04:34 -05001142 return
1143 }
Russ Coxd98553a2014-11-11 17:04:34 -05001144
1145 if stackDebug > 0 {
1146 print("shrinking stack ", oldsize, "->", newsize, "\n")
1147 }
Russ Cox0fcf54b2014-11-15 08:00:38 -05001148
Austin Clements276b1772016-02-15 17:38:06 -05001149 copystack(gp, newsize, false)
Russ Coxd98553a2014-11-11 17:04:34 -05001150}
1151
Austin Clementsd57056b2015-06-22 10:24:50 -04001152// freeStackSpans frees unused stack spans at the end of GC.
1153func freeStackSpans() {
Russ Coxd98553a2014-11-11 17:04:34 -05001154 lock(&stackpoolmu)
Austin Clementsd57056b2015-06-22 10:24:50 -04001155
1156 // Scan stack pools for empty stack spans.
1157 for order := range stackpool {
1158 list := &stackpool[order]
Matthew Dempsky1652a2c2015-10-15 15:59:49 -07001159 for s := list.first; s != nil; {
Austin Clementsd57056b2015-06-22 10:24:50 -04001160 next := s.next
Rick Hudsone4ac2d42016-02-16 17:16:43 -05001161 if s.allocCount == 0 {
Matthew Dempskyc17c42e2015-11-11 16:13:51 -08001162 list.remove(s)
Austin Clementsab9db512017-03-16 15:02:02 -04001163 s.manualFreeList = 0
Austin Clements407c56ae2017-03-16 14:46:53 -04001164 mheap_.freeManual(s, &memstats.stacks_inuse)
Austin Clementsd57056b2015-06-22 10:24:50 -04001165 }
1166 s = next
1167 }
Russ Coxd98553a2014-11-11 17:04:34 -05001168 }
Austin Clementsd57056b2015-06-22 10:24:50 -04001169
Austin Clementsd57056b2015-06-22 10:24:50 -04001170 unlock(&stackpoolmu)
Austin Clements0cbf8d12015-12-14 14:30:25 -05001171
1172 // Free large stack spans.
1173 lock(&stackLarge.lock)
1174 for i := range stackLarge.free {
1175 for s := stackLarge.free[i].first; s != nil; {
1176 next := s.next
1177 stackLarge.free[i].remove(s)
Austin Clements407c56ae2017-03-16 14:46:53 -04001178 mheap_.freeManual(s, &memstats.stacks_inuse)
Austin Clements0cbf8d12015-12-14 14:30:25 -05001179 s = next
1180 }
1181 }
1182 unlock(&stackLarge.lock)
Russ Coxd98553a2014-11-11 17:04:34 -05001183}
Russ Coxe6d35112015-01-05 16:29:21 +00001184
1185//go:nosplit
1186func morestackc() {
1187 systemstack(func() {
Austin Clements4234d1d2017-03-23 11:20:17 -04001188 throw("attempt to execute system stack code on user stack")
Russ Coxe6d35112015-01-05 16:29:21 +00001189 })
1190}