Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1 | // Copyright 2013 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
Michael Matloob | 67faca7 | 2015-11-02 14:09:24 -0500 | [diff] [blame] | 7 | import ( |
| 8 | "runtime/internal/atomic" |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 9 | "runtime/internal/sys" |
Michael Matloob | 67faca7 | 2015-11-02 14:09:24 -0500 | [diff] [blame] | 10 | "unsafe" |
| 11 | ) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 12 | |
Nodir Turakulov | db2e73f | 2015-10-16 18:45:30 -0700 | [diff] [blame] | 13 | /* |
| 14 | Stack layout parameters. |
| 15 | Included both by runtime (compiled via 6c) and linkers (compiled via gcc). |
| 16 | |
| 17 | The per-goroutine g->stackguard is set to point StackGuard bytes |
| 18 | above the bottom of the stack. Each function compares its stack |
| 19 | pointer against g->stackguard to check for overflow. To cut one |
| 20 | instruction from the check sequence for functions with tiny frames, |
| 21 | the stack is allowed to protrude StackSmall bytes below the stack |
| 22 | guard. Functions with large frames don't bother with the check and |
| 23 | always call morestack. The sequences are (for amd64, others are |
| 24 | similar): |
| 25 | |
| 26 | guard = g->stackguard |
| 27 | frame = function's stack frame size |
| 28 | argsize = size of function arguments (call + return) |
| 29 | |
| 30 | stack frame size <= StackSmall: |
| 31 | CMPQ guard, SP |
| 32 | JHI 3(PC) |
| 33 | MOVQ m->morearg, $(argsize << 32) |
| 34 | CALL morestack(SB) |
| 35 | |
| 36 | stack frame size > StackSmall but < StackBig |
| 37 | LEAQ (frame-StackSmall)(SP), R0 |
| 38 | CMPQ guard, R0 |
| 39 | JHI 3(PC) |
| 40 | MOVQ m->morearg, $(argsize << 32) |
| 41 | CALL morestack(SB) |
| 42 | |
| 43 | stack frame size >= StackBig: |
| 44 | MOVQ m->morearg, $((argsize << 32) | frame) |
| 45 | CALL morestack(SB) |
| 46 | |
| 47 | The bottom StackGuard - StackSmall bytes are important: there has |
| 48 | to be enough room to execute functions that refuse to check for |
| 49 | stack overflow, either because they need to be adjacent to the |
| 50 | actual caller's frame (deferproc) or because they handle the imminent |
| 51 | stack overflow (morestack). |
| 52 | |
| 53 | For example, deferproc might call malloc, which does one of the |
| 54 | above checks (without allocating a full frame), which might trigger |
| 55 | a call to morestack. This sequence needs to fit in the bottom |
| 56 | section of the stack. On amd64, morestack's frame is 40 bytes, and |
| 57 | deferproc's frame is 56 bytes. That fits well within the |
| 58 | StackGuard - StackSmall bytes at the bottom. |
| 59 | The linkers explore all possible call traces involving non-splitting |
| 60 | functions to make sure that this limit cannot be violated. |
| 61 | */ |
| 62 | |
| 63 | const ( |
| 64 | // StackSystem is a number of additional bytes to add |
| 65 | // to each stack below the usual guard area for OS-specific |
| 66 | // purposes like signal handling. Used on Windows, Plan 9, |
| 67 | // and Darwin/ARM because they do not use a separate stack. |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 68 | _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 |
Nodir Turakulov | db2e73f | 2015-10-16 18:45:30 -0700 | [diff] [blame] | 69 | |
| 70 | // The minimum size of stack used by Go code |
| 71 | _StackMin = 2048 |
| 72 | |
| 73 | // The minimum stack size to allocate. |
| 74 | // The hackery here rounds FixedStack0 up to a power of 2. |
| 75 | _FixedStack0 = _StackMin + _StackSystem |
| 76 | _FixedStack1 = _FixedStack0 - 1 |
| 77 | _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) |
| 78 | _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) |
| 79 | _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) |
| 80 | _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) |
| 81 | _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) |
| 82 | _FixedStack = _FixedStack6 + 1 |
| 83 | |
| 84 | // Functions that need frames bigger than this use an extra |
| 85 | // instruction to do the stack split check, to avoid overflow |
| 86 | // in case SP - framesize wraps below zero. |
| 87 | // This value can be no bigger than the size of the unmapped |
| 88 | // space at zero. |
| 89 | _StackBig = 4096 |
| 90 | |
| 91 | // The stack guard is a pointer this many bytes above the |
| 92 | // bottom of the stack. |
David Chase | 5b9ff11 | 2016-08-15 13:51:00 -0700 | [diff] [blame] | 93 | _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem |
Nodir Turakulov | db2e73f | 2015-10-16 18:45:30 -0700 | [diff] [blame] | 94 | |
| 95 | // After a stack split check the SP is allowed to be this |
Brad Fitzpatrick | 5fea2cc | 2016-03-01 23:21:55 +0000 | [diff] [blame] | 96 | // many bytes below the stack guard. This saves an instruction |
Nodir Turakulov | db2e73f | 2015-10-16 18:45:30 -0700 | [diff] [blame] | 97 | // in the checking sequence for tiny frames. |
| 98 | _StackSmall = 128 |
| 99 | |
| 100 | // The maximum number of bytes that a chain of NOSPLIT |
| 101 | // functions can use. |
| 102 | _StackLimit = _StackGuard - _StackSystem - _StackSmall |
| 103 | ) |
| 104 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 105 | const ( |
Alex Brainman | 031c3bc | 2015-05-01 15:53:45 +1000 | [diff] [blame] | 106 | // stackDebug == 0: no logging |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 107 | // == 1: logging of per-stack operations |
| 108 | // == 2: logging of per-frame operations |
| 109 | // == 3: logging of per-word updates |
| 110 | // == 4: logging of per-word reads |
| 111 | stackDebug = 0 |
| 112 | stackFromSystem = 0 // allocate stacks from system memory instead of the heap |
| 113 | stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free |
| 114 | stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy |
Austin Clements | 4754252 | 2017-05-18 14:35:53 -0400 | [diff] [blame] | 115 | stackNoCache = 0 // disable per-P small stack caches |
Keith Randall | 1ea60c1 | 2016-12-02 15:17:52 -0800 | [diff] [blame] | 116 | |
| 117 | // check the BP links during traceback. |
| 118 | debugCheckBP = false |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 119 | ) |
| 120 | |
| 121 | const ( |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 122 | uintptrMask = 1<<(8*sys.PtrSize) - 1 |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 123 | |
| 124 | // Goroutine preemption request. |
Russ Cox | e6d3511 | 2015-01-05 16:29:21 +0000 | [diff] [blame] | 125 | // Stored into g->stackguard0 to cause split stack check failure. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 126 | // Must be greater than any real sp. |
| 127 | // 0xfffffade in hex. |
| 128 | stackPreempt = uintptrMask & -1314 |
| 129 | |
| 130 | // Thread is forking. |
Russ Cox | e6d3511 | 2015-01-05 16:29:21 +0000 | [diff] [blame] | 131 | // Stored into g->stackguard0 to cause split stack check failure. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 132 | // Must be greater than any real sp. |
| 133 | stackFork = uintptrMask & -1234 |
| 134 | ) |
| 135 | |
| 136 | // Global pool of spans that have free stacks. |
| 137 | // Stacks are assigned an order according to size. |
| 138 | // order = log_2(size/FixedStack) |
| 139 | // There is a free list for each order. |
| 140 | // TODO: one lock per order? |
Matthew Dempsky | 1652a2c | 2015-10-15 15:59:49 -0700 | [diff] [blame] | 141 | var stackpool [_NumStackOrders]mSpanList |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 142 | var stackpoolmu mutex |
| 143 | |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 144 | // Global pool of large stack spans. |
| 145 | var stackLarge struct { |
| 146 | lock mutex |
| 147 | free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) |
| 148 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 149 | |
| 150 | func stackinit() { |
| 151 | if _StackCacheSize&_PageMask != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 152 | throw("cache size must be a multiple of page size") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 153 | } |
| 154 | for i := range stackpool { |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 155 | stackpool[i].init() |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 156 | } |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 157 | for i := range stackLarge.free { |
| 158 | stackLarge.free[i].init() |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | // stacklog2 returns ⌊log_2(n)⌋. |
| 163 | func stacklog2(n uintptr) int { |
| 164 | log2 := 0 |
| 165 | for n > 1 { |
| 166 | n >>= 1 |
| 167 | log2++ |
| 168 | } |
| 169 | return log2 |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 170 | } |
| 171 | |
Brad Fitzpatrick | 5fea2cc | 2016-03-01 23:21:55 +0000 | [diff] [blame] | 172 | // Allocates a stack from the free pool. Must be called with |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 173 | // stackpoolmu held. |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 174 | func stackpoolalloc(order uint8) gclinkptr { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 175 | list := &stackpool[order] |
Matthew Dempsky | 1652a2c | 2015-10-15 15:59:49 -0700 | [diff] [blame] | 176 | s := list.first |
| 177 | if s == nil { |
Brad Fitzpatrick | 5fea2cc | 2016-03-01 23:21:55 +0000 | [diff] [blame] | 178 | // no free stacks. Allocate another span worth. |
Austin Clements | 407c56ae | 2017-03-16 14:46:53 -0400 | [diff] [blame] | 179 | s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 180 | if s == nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 181 | throw("out of memory") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 182 | } |
Rick Hudson | e4ac2d4 | 2016-02-16 17:16:43 -0500 | [diff] [blame] | 183 | if s.allocCount != 0 { |
| 184 | throw("bad allocCount") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 185 | } |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 186 | if s.manualFreeList.ptr() != nil { |
| 187 | throw("bad manualFreeList") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 188 | } |
Austin Clements | 9741f02 | 2017-03-16 14:55:10 -0400 | [diff] [blame] | 189 | s.elemsize = _FixedStack << order |
| 190 | for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { |
Austin Clements | b7adc41 | 2016-04-28 10:59:00 -0400 | [diff] [blame] | 191 | x := gclinkptr(s.base() + i) |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 192 | x.ptr().next = s.manualFreeList |
| 193 | s.manualFreeList = x |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 194 | } |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 195 | list.insert(s) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 196 | } |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 197 | x := s.manualFreeList |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 198 | if x.ptr() == nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 199 | throw("span has no free stacks") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 200 | } |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 201 | s.manualFreeList = x.ptr().next |
Rick Hudson | e4ac2d4 | 2016-02-16 17:16:43 -0500 | [diff] [blame] | 202 | s.allocCount++ |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 203 | if s.manualFreeList.ptr() == nil { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 204 | // all stacks in s are allocated. |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 205 | list.remove(s) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 206 | } |
| 207 | return x |
| 208 | } |
| 209 | |
Brad Fitzpatrick | 5fea2cc | 2016-03-01 23:21:55 +0000 | [diff] [blame] | 210 | // Adds stack x to the free pool. Must be called with stackpoolmu held. |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 211 | func stackpoolfree(x gclinkptr, order uint8) { |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 212 | s := mheap_.lookup(unsafe.Pointer(x)) |
Austin Clements | 8fbaa4f | 2017-03-16 14:16:31 -0400 | [diff] [blame] | 213 | if s.state != _MSpanManual { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 214 | throw("freeing stack not in a stack span") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 215 | } |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 216 | if s.manualFreeList.ptr() == nil { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 217 | // s will now have a free stack |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 218 | stackpool[order].insert(s) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 219 | } |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 220 | x.ptr().next = s.manualFreeList |
| 221 | s.manualFreeList = x |
Rick Hudson | e4ac2d4 | 2016-02-16 17:16:43 -0500 | [diff] [blame] | 222 | s.allocCount-- |
| 223 | if gcphase == _GCoff && s.allocCount == 0 { |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 224 | // Span is completely free. Return it to the heap |
| 225 | // immediately if we're sweeping. |
| 226 | // |
| 227 | // If GC is active, we delay the free until the end of |
| 228 | // GC to avoid the following type of situation: |
| 229 | // |
| 230 | // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer |
| 231 | // 2) The stack that pointer points to is copied |
| 232 | // 3) The old stack is freed |
| 233 | // 4) The containing span is marked free |
| 234 | // 5) GC attempts to mark the SudoG.elem pointer. The |
| 235 | // marking fails because the pointer looks like a |
| 236 | // pointer into a free span. |
| 237 | // |
| 238 | // By not freeing, we prevent step #4 until GC is done. |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 239 | stackpool[order].remove(s) |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 240 | s.manualFreeList = 0 |
Austin Clements | 407c56ae | 2017-03-16 14:46:53 -0400 | [diff] [blame] | 241 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 242 | } |
| 243 | } |
| 244 | |
| 245 | // stackcacherefill/stackcacherelease implement a global pool of stack segments. |
| 246 | // The pool is required to prevent unlimited growth of per-thread caches. |
Austin Clements | 6a86dbe | 2016-05-27 12:21:14 -0400 | [diff] [blame] | 247 | // |
| 248 | //go:systemstack |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 249 | func stackcacherefill(c *mcache, order uint8) { |
| 250 | if stackDebug >= 1 { |
| 251 | print("stackcacherefill order=", order, "\n") |
| 252 | } |
| 253 | |
| 254 | // Grab some stacks from the global cache. |
| 255 | // Grab half of the allowed capacity (to prevent thrashing). |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 256 | var list gclinkptr |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 257 | var size uintptr |
| 258 | lock(&stackpoolmu) |
| 259 | for size < _StackCacheSize/2 { |
| 260 | x := stackpoolalloc(order) |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 261 | x.ptr().next = list |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 262 | list = x |
| 263 | size += _FixedStack << order |
| 264 | } |
| 265 | unlock(&stackpoolmu) |
| 266 | c.stackcache[order].list = list |
| 267 | c.stackcache[order].size = size |
| 268 | } |
| 269 | |
Austin Clements | 6a86dbe | 2016-05-27 12:21:14 -0400 | [diff] [blame] | 270 | //go:systemstack |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 271 | func stackcacherelease(c *mcache, order uint8) { |
| 272 | if stackDebug >= 1 { |
| 273 | print("stackcacherelease order=", order, "\n") |
| 274 | } |
| 275 | x := c.stackcache[order].list |
| 276 | size := c.stackcache[order].size |
| 277 | lock(&stackpoolmu) |
| 278 | for size > _StackCacheSize/2 { |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 279 | y := x.ptr().next |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 280 | stackpoolfree(x, order) |
| 281 | x = y |
| 282 | size -= _FixedStack << order |
| 283 | } |
| 284 | unlock(&stackpoolmu) |
| 285 | c.stackcache[order].list = x |
| 286 | c.stackcache[order].size = size |
| 287 | } |
| 288 | |
Austin Clements | 6a86dbe | 2016-05-27 12:21:14 -0400 | [diff] [blame] | 289 | //go:systemstack |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 290 | func stackcache_clear(c *mcache) { |
| 291 | if stackDebug >= 1 { |
| 292 | print("stackcache clear\n") |
| 293 | } |
| 294 | lock(&stackpoolmu) |
| 295 | for order := uint8(0); order < _NumStackOrders; order++ { |
| 296 | x := c.stackcache[order].list |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 297 | for x.ptr() != nil { |
| 298 | y := x.ptr().next |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 299 | stackpoolfree(x, order) |
| 300 | x = y |
| 301 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 302 | c.stackcache[order].list = 0 |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 303 | c.stackcache[order].size = 0 |
| 304 | } |
| 305 | unlock(&stackpoolmu) |
| 306 | } |
| 307 | |
Austin Clements | 6a86dbe | 2016-05-27 12:21:14 -0400 | [diff] [blame] | 308 | // stackalloc allocates an n byte stack. |
| 309 | // |
| 310 | // stackalloc must run on the system stack because it uses per-P |
| 311 | // resources and must not split the stack. |
| 312 | // |
| 313 | //go:systemstack |
Austin Clements | d089a6c | 2017-02-09 14:03:49 -0500 | [diff] [blame] | 314 | func stackalloc(n uint32) stack { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 315 | // Stackalloc must be called on scheduler stack, so that we |
| 316 | // never try to grow the stack during the code that stackalloc runs. |
| 317 | // Doing so would cause a deadlock (issue 1547). |
| 318 | thisg := getg() |
| 319 | if thisg != thisg.m.g0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 320 | throw("stackalloc not on scheduler stack") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 321 | } |
| 322 | if n&(n-1) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 323 | throw("stack size not a power of 2") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 324 | } |
| 325 | if stackDebug >= 1 { |
| 326 | print("stackalloc ", n, "\n") |
| 327 | } |
| 328 | |
| 329 | if debug.efence != 0 || stackFromSystem != 0 { |
Austin Clements | 8a1c5b2 | 2017-05-18 13:59:00 -0400 | [diff] [blame] | 330 | n = uint32(round(uintptr(n), physPageSize)) |
| 331 | v := sysAlloc(uintptr(n), &memstats.stacks_sys) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 332 | if v == nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 333 | throw("out of memory (stackalloc)") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 334 | } |
Austin Clements | d089a6c | 2017-02-09 14:03:49 -0500 | [diff] [blame] | 335 | return stack{uintptr(v), uintptr(v) + uintptr(n)} |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | // Small stacks are allocated with a fixed-size free-list allocator. |
| 339 | // If we need a stack of a bigger size, we fall back on allocating |
| 340 | // a dedicated span. |
| 341 | var v unsafe.Pointer |
Austin Clements | 4754252 | 2017-05-18 14:35:53 -0400 | [diff] [blame] | 342 | if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 343 | order := uint8(0) |
| 344 | n2 := n |
| 345 | for n2 > _FixedStack { |
| 346 | order++ |
| 347 | n2 >>= 1 |
| 348 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 349 | var x gclinkptr |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 350 | c := thisg.m.mcache |
Austin Clements | 4754252 | 2017-05-18 14:35:53 -0400 | [diff] [blame] | 351 | if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 352 | // c == nil can happen in the guts of exitsyscall or |
| 353 | // procresize. Just get a stack from the global pool. |
| 354 | // Also don't touch stackcache during gc |
| 355 | // as it's flushed concurrently. |
| 356 | lock(&stackpoolmu) |
| 357 | x = stackpoolalloc(order) |
| 358 | unlock(&stackpoolmu) |
| 359 | } else { |
| 360 | x = c.stackcache[order].list |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 361 | if x.ptr() == nil { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 362 | stackcacherefill(c, order) |
| 363 | x = c.stackcache[order].list |
| 364 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 365 | c.stackcache[order].list = x.ptr().next |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 366 | c.stackcache[order].size -= uintptr(n) |
| 367 | } |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 368 | v = unsafe.Pointer(x) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 369 | } else { |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 370 | var s *mspan |
| 371 | npage := uintptr(n) >> _PageShift |
| 372 | log2npage := stacklog2(npage) |
| 373 | |
| 374 | // Try to get a stack from the large stack cache. |
| 375 | lock(&stackLarge.lock) |
| 376 | if !stackLarge.free[log2npage].isEmpty() { |
| 377 | s = stackLarge.free[log2npage].first |
| 378 | stackLarge.free[log2npage].remove(s) |
| 379 | } |
| 380 | unlock(&stackLarge.lock) |
| 381 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 382 | if s == nil { |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 383 | // Allocate a new stack from the heap. |
Austin Clements | 407c56ae | 2017-03-16 14:46:53 -0400 | [diff] [blame] | 384 | s = mheap_.allocManual(npage, &memstats.stacks_inuse) |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 385 | if s == nil { |
| 386 | throw("out of memory") |
| 387 | } |
Austin Clements | 9741f02 | 2017-03-16 14:55:10 -0400 | [diff] [blame] | 388 | s.elemsize = uintptr(n) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 389 | } |
Austin Clements | b7adc41 | 2016-04-28 10:59:00 -0400 | [diff] [blame] | 390 | v = unsafe.Pointer(s.base()) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | if raceenabled { |
| 394 | racemalloc(v, uintptr(n)) |
| 395 | } |
Ian Lance Taylor | 73f329f | 2015-10-21 11:04:42 -0700 | [diff] [blame] | 396 | if msanenabled { |
| 397 | msanmalloc(v, uintptr(n)) |
| 398 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 399 | if stackDebug >= 1 { |
| 400 | print(" allocated ", v, "\n") |
| 401 | } |
Austin Clements | d089a6c | 2017-02-09 14:03:49 -0500 | [diff] [blame] | 402 | return stack{uintptr(v), uintptr(v) + uintptr(n)} |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 403 | } |
| 404 | |
Austin Clements | 6a86dbe | 2016-05-27 12:21:14 -0400 | [diff] [blame] | 405 | // stackfree frees an n byte stack allocation at stk. |
| 406 | // |
| 407 | // stackfree must run on the system stack because it uses per-P |
| 408 | // resources and must not split the stack. |
| 409 | // |
| 410 | //go:systemstack |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 411 | func stackfree(stk stack) { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 412 | gp := getg() |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 413 | v := unsafe.Pointer(stk.lo) |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 414 | n := stk.hi - stk.lo |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 415 | if n&(n-1) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 416 | throw("stack not a power of 2") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 417 | } |
Austin Clements | e610c25 | 2015-05-20 15:29:53 -0400 | [diff] [blame] | 418 | if stk.lo+n < stk.hi { |
| 419 | throw("bad stack size") |
| 420 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 421 | if stackDebug >= 1 { |
| 422 | println("stackfree", v, n) |
Austin Clements | 87e48c5 | 2016-10-17 18:41:56 -0400 | [diff] [blame] | 423 | memclrNoHeapPointers(v, n) // for testing, clobber stack data |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 424 | } |
| 425 | if debug.efence != 0 || stackFromSystem != 0 { |
| 426 | if debug.efence != 0 || stackFaultOnFree != 0 { |
| 427 | sysFault(v, n) |
| 428 | } else { |
| 429 | sysFree(v, n, &memstats.stacks_sys) |
| 430 | } |
| 431 | return |
| 432 | } |
Ian Lance Taylor | 73f329f | 2015-10-21 11:04:42 -0700 | [diff] [blame] | 433 | if msanenabled { |
| 434 | msanfree(v, n) |
| 435 | } |
Austin Clements | 4754252 | 2017-05-18 14:35:53 -0400 | [diff] [blame] | 436 | if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 437 | order := uint8(0) |
| 438 | n2 := n |
| 439 | for n2 > _FixedStack { |
| 440 | order++ |
| 441 | n2 >>= 1 |
| 442 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 443 | x := gclinkptr(v) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 444 | c := gp.m.mcache |
Austin Clements | 4754252 | 2017-05-18 14:35:53 -0400 | [diff] [blame] | 445 | if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 446 | lock(&stackpoolmu) |
| 447 | stackpoolfree(x, order) |
| 448 | unlock(&stackpoolmu) |
| 449 | } else { |
| 450 | if c.stackcache[order].size >= _StackCacheSize { |
| 451 | stackcacherelease(c, order) |
| 452 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 453 | x.ptr().next = c.stackcache[order].list |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 454 | c.stackcache[order].list = x |
| 455 | c.stackcache[order].size += n |
| 456 | } |
| 457 | } else { |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 458 | s := mheap_.lookup(v) |
Austin Clements | 8fbaa4f | 2017-03-16 14:16:31 -0400 | [diff] [blame] | 459 | if s.state != _MSpanManual { |
Austin Clements | b7adc41 | 2016-04-28 10:59:00 -0400 | [diff] [blame] | 460 | println(hex(s.base()), v) |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 461 | throw("bad span state") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 462 | } |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 463 | if gcphase == _GCoff { |
| 464 | // Free the stack immediately if we're |
| 465 | // sweeping. |
Austin Clements | 407c56ae | 2017-03-16 14:46:53 -0400 | [diff] [blame] | 466 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 467 | } else { |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 468 | // If the GC is running, we can't return a |
| 469 | // stack span to the heap because it could be |
| 470 | // reused as a heap span, and this state |
| 471 | // change would race with GC. Add it to the |
| 472 | // large stack cache instead. |
| 473 | log2npage := stacklog2(s.npages) |
| 474 | lock(&stackLarge.lock) |
| 475 | stackLarge.free[log2npage].insert(s) |
| 476 | unlock(&stackLarge.lock) |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 477 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 478 | } |
| 479 | } |
| 480 | |
| 481 | var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real |
| 482 | |
Russ Cox | 4d0f3a1 | 2015-04-27 22:45:57 -0400 | [diff] [blame] | 483 | var ptrnames = []string{ |
| 484 | 0: "scalar", |
| 485 | 1: "ptr", |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | // Stack frame layout |
| 489 | // |
| 490 | // (x86) |
| 491 | // +------------------+ |
| 492 | // | args from caller | |
| 493 | // +------------------+ <- frame->argp |
| 494 | // | return address | |
Austin Clements | 3c0fee1 | 2015-01-14 11:09:50 -0500 | [diff] [blame] | 495 | // +------------------+ |
| 496 | // | caller's BP (*) | (*) if framepointer_enabled && varp < sp |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 497 | // +------------------+ <- frame->varp |
| 498 | // | locals | |
| 499 | // +------------------+ |
| 500 | // | args to callee | |
| 501 | // +------------------+ <- frame->sp |
| 502 | // |
| 503 | // (arm) |
| 504 | // +------------------+ |
| 505 | // | args from caller | |
| 506 | // +------------------+ <- frame->argp |
| 507 | // | caller's retaddr | |
| 508 | // +------------------+ <- frame->varp |
| 509 | // | locals | |
| 510 | // +------------------+ |
| 511 | // | args to callee | |
| 512 | // +------------------+ |
| 513 | // | return address | |
| 514 | // +------------------+ <- frame->sp |
| 515 | |
| 516 | type adjustinfo struct { |
| 517 | old stack |
| 518 | delta uintptr // ptr distance from old to new stack (newbase - oldbase) |
Austin Clements | beedb1e | 2015-08-12 23:43:43 -0400 | [diff] [blame] | 519 | cache pcvalueCache |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 520 | |
| 521 | // sghi is the highest sudog.elem on the stack. |
| 522 | sghi uintptr |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 523 | } |
| 524 | |
| 525 | // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. |
| 526 | // If so, it rewrites *vpp to point into the new stack. |
| 527 | func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { |
Austin Clements | d62d831 | 2015-11-23 11:34:16 -0500 | [diff] [blame] | 528 | pp := (*uintptr)(vpp) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 529 | p := *pp |
| 530 | if stackDebug >= 4 { |
Austin Clements | d62d831 | 2015-11-23 11:34:16 -0500 | [diff] [blame] | 531 | print(" ", pp, ":", hex(p), "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 532 | } |
Austin Clements | d62d831 | 2015-11-23 11:34:16 -0500 | [diff] [blame] | 533 | if adjinfo.old.lo <= p && p < adjinfo.old.hi { |
| 534 | *pp = p + adjinfo.delta |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 535 | if stackDebug >= 3 { |
Austin Clements | d62d831 | 2015-11-23 11:34:16 -0500 | [diff] [blame] | 536 | print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 537 | } |
| 538 | } |
| 539 | } |
| 540 | |
Russ Cox | 0234dfd | 2015-05-04 10:19:24 -0400 | [diff] [blame] | 541 | // Information from the compiler about the layout of stack frames. |
| 542 | type bitvector struct { |
| 543 | n int32 // # of bits |
| 544 | bytedata *uint8 |
| 545 | } |
| 546 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 547 | type gobitvector struct { |
| 548 | n uintptr |
| 549 | bytedata []uint8 |
| 550 | } |
| 551 | |
| 552 | func gobv(bv bitvector) gobitvector { |
| 553 | return gobitvector{ |
| 554 | uintptr(bv.n), |
| 555 | (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], |
| 556 | } |
| 557 | } |
| 558 | |
Russ Cox | 4d0f3a1 | 2015-04-27 22:45:57 -0400 | [diff] [blame] | 559 | func ptrbit(bv *gobitvector, i uintptr) uint8 { |
| 560 | return (bv.bytedata[i/8] >> (i % 8)) & 1 |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | // bv describes the memory starting at address scanp. |
| 564 | // Adjust any pointers contained therein. |
Austin Clements | 0efc8b2 | 2017-02-20 22:37:07 -0500 | [diff] [blame] | 565 | func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 566 | bv := gobv(*cbv) |
| 567 | minp := adjinfo.old.lo |
| 568 | maxp := adjinfo.old.hi |
| 569 | delta := adjinfo.delta |
Matthew Dempsky | a03bdc3 | 2016-02-29 15:01:00 -0800 | [diff] [blame] | 570 | num := bv.n |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 571 | // If this frame might contain channel receive slots, use CAS |
| 572 | // to adjust pointers. If the slot hasn't been received into |
| 573 | // yet, it may contain stack pointers and a concurrent send |
| 574 | // could race with adjusting those pointers. (The sent value |
| 575 | // itself can never contain stack pointers.) |
| 576 | useCAS := uintptr(scanp) < adjinfo.sghi |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 577 | for i := uintptr(0); i < num; i++ { |
| 578 | if stackDebug >= 4 { |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 579 | print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 580 | } |
Daniel Martí | 59413d3 | 2017-08-17 15:51:35 +0100 | [diff] [blame] | 581 | if ptrbit(&bv, i) != 1 { |
| 582 | continue |
| 583 | } |
| 584 | pp := (*uintptr)(add(scanp, i*sys.PtrSize)) |
| 585 | retry: |
| 586 | p := *pp |
| 587 | if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { |
| 588 | // Looks like a junk value in a pointer slot. |
| 589 | // Live analysis wrong? |
| 590 | getg().m.traceback = 2 |
| 591 | print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") |
| 592 | throw("invalid pointer found on stack") |
| 593 | } |
| 594 | if minp <= p && p < maxp { |
| 595 | if stackDebug >= 3 { |
| 596 | print("adjust ptr ", hex(p), " ", funcname(f), "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 597 | } |
Daniel Martí | 59413d3 | 2017-08-17 15:51:35 +0100 | [diff] [blame] | 598 | if useCAS { |
| 599 | ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) |
| 600 | if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { |
| 601 | goto retry |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 602 | } |
Daniel Martí | 59413d3 | 2017-08-17 15:51:35 +0100 | [diff] [blame] | 603 | } else { |
| 604 | *pp = p + delta |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 605 | } |
| 606 | } |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | // Note: the argument/return area is adjusted by the callee. |
| 611 | func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { |
| 612 | adjinfo := (*adjustinfo)(arg) |
| 613 | targetpc := frame.continpc |
| 614 | if targetpc == 0 { |
| 615 | // Frame is dead. |
| 616 | return true |
| 617 | } |
| 618 | f := frame.fn |
| 619 | if stackDebug >= 2 { |
| 620 | print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") |
| 621 | } |
Russ Cox | 656be31 | 2014-11-12 14:54:31 -0500 | [diff] [blame] | 622 | if f.entry == systemstack_switchPC { |
| 623 | // A special routine at the bottom of stack of a goroutine that does an systemstack call. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 624 | // We will allow it to be copied even though we don't |
| 625 | // have full GC info for it (because it is written in asm). |
| 626 | return true |
| 627 | } |
| 628 | if targetpc != f.entry { |
| 629 | targetpc-- |
| 630 | } |
Austin Clements | beedb1e | 2015-08-12 23:43:43 -0400 | [diff] [blame] | 631 | pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 632 | if pcdata == -1 { |
| 633 | pcdata = 0 // in prologue |
| 634 | } |
| 635 | |
| 636 | // Adjust local variables if stack frame has been allocated. |
| 637 | size := frame.varp - frame.sp |
| 638 | var minsize uintptr |
Jeremy Jackins | ba09d06 | 2016-04-07 15:42:35 +0900 | [diff] [blame] | 639 | switch sys.ArchFamily { |
| 640 | case sys.ARM64: |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 641 | minsize = sys.SpAlign |
Aram Hăvărneanu | 846ee04 | 2015-03-08 14:20:20 +0100 | [diff] [blame] | 642 | default: |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 643 | minsize = sys.MinFrameSize |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 644 | } |
| 645 | if size > minsize { |
| 646 | var bv bitvector |
| 647 | stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) |
| 648 | if stackmap == nil || stackmap.n <= 0 { |
| 649 | print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 650 | throw("missing stackmap") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 651 | } |
| 652 | // Locals bitmap information, scan just the pointers in locals. |
| 653 | if pcdata < 0 || pcdata >= stackmap.n { |
| 654 | // don't know where we are |
| 655 | print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 656 | throw("bad symbol table") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 657 | } |
| 658 | bv = stackmapdata(stackmap, pcdata) |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 659 | size = uintptr(bv.n) * sys.PtrSize |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 660 | if stackDebug >= 3 { |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 661 | print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 662 | } |
| 663 | adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) |
| 664 | } |
| 665 | |
Austin Clements | 3c0fee1 | 2015-01-14 11:09:50 -0500 | [diff] [blame] | 666 | // Adjust saved base pointer if there is one. |
Jeremy Jackins | ba09d06 | 2016-04-07 15:42:35 +0900 | [diff] [blame] | 667 | if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { |
Austin Clements | 3c0fee1 | 2015-01-14 11:09:50 -0500 | [diff] [blame] | 668 | if !framepointer_enabled { |
Austin Clements | c901bd0 | 2015-02-03 08:35:38 -0500 | [diff] [blame] | 669 | print("runtime: found space for saved base pointer, but no framepointer experiment\n") |
Austin Clements | 67a03fd | 2015-02-03 09:09:56 -0500 | [diff] [blame] | 670 | print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") |
Austin Clements | 3c0fee1 | 2015-01-14 11:09:50 -0500 | [diff] [blame] | 671 | throw("bad frame layout") |
| 672 | } |
| 673 | if stackDebug >= 3 { |
| 674 | print(" saved bp\n") |
| 675 | } |
Keith Randall | 1ea60c1 | 2016-12-02 15:17:52 -0800 | [diff] [blame] | 676 | if debugCheckBP { |
| 677 | // Frame pointers should always point to the next higher frame on |
| 678 | // the Go stack (or be nil, for the top frame on the stack). |
| 679 | bp := *(*uintptr)(unsafe.Pointer(frame.varp)) |
| 680 | if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { |
| 681 | println("runtime: found invalid frame pointer") |
| 682 | print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") |
| 683 | throw("bad frame pointer") |
| 684 | } |
| 685 | } |
Austin Clements | 3c0fee1 | 2015-01-14 11:09:50 -0500 | [diff] [blame] | 686 | adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) |
| 687 | } |
| 688 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 689 | // Adjust arguments. |
| 690 | if frame.arglen > 0 { |
| 691 | var bv bitvector |
| 692 | if frame.argmap != nil { |
| 693 | bv = *frame.argmap |
| 694 | } else { |
| 695 | stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) |
| 696 | if stackmap == nil || stackmap.n <= 0 { |
Matthew Dempsky | a03bdc3 | 2016-02-29 15:01:00 -0800 | [diff] [blame] | 697 | print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 698 | throw("missing stackmap") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 699 | } |
| 700 | if pcdata < 0 || pcdata >= stackmap.n { |
| 701 | // don't know where we are |
| 702 | print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 703 | throw("bad symbol table") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 704 | } |
| 705 | bv = stackmapdata(stackmap, pcdata) |
| 706 | } |
| 707 | if stackDebug >= 3 { |
| 708 | print(" args\n") |
| 709 | } |
Austin Clements | 0efc8b2 | 2017-02-20 22:37:07 -0500 | [diff] [blame] | 710 | adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{}) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 711 | } |
| 712 | return true |
| 713 | } |
| 714 | |
| 715 | func adjustctxt(gp *g, adjinfo *adjustinfo) { |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 716 | adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) |
Keith Randall | 1ea60c1 | 2016-12-02 15:17:52 -0800 | [diff] [blame] | 717 | if !framepointer_enabled { |
| 718 | return |
| 719 | } |
| 720 | if debugCheckBP { |
| 721 | bp := gp.sched.bp |
| 722 | if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { |
| 723 | println("runtime: found invalid top frame pointer") |
| 724 | print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") |
| 725 | throw("bad top frame pointer") |
| 726 | } |
| 727 | } |
| 728 | adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | func adjustdefers(gp *g, adjinfo *adjustinfo) { |
| 732 | // Adjust defer argument blocks the same way we adjust active stack frames. |
| 733 | tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) |
| 734 | |
| 735 | // Adjust pointers in the Defer structs. |
| 736 | // Defer structs themselves are never on the stack. |
| 737 | for d := gp._defer; d != nil; d = d.link { |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 738 | adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) |
| 739 | adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) |
| 740 | adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 741 | } |
| 742 | } |
| 743 | |
| 744 | func adjustpanics(gp *g, adjinfo *adjustinfo) { |
| 745 | // Panics are on stack and already adjusted. |
| 746 | // Update pointer to head of list in G. |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 747 | adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | func adjustsudogs(gp *g, adjinfo *adjustinfo) { |
| 751 | // the data elements pointed to by a SudoG structure |
| 752 | // might be in the stack. |
| 753 | for s := gp.waiting; s != nil; s = s.waitlink { |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 754 | adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 755 | } |
| 756 | } |
| 757 | |
| 758 | func fillstack(stk stack, b byte) { |
| 759 | for p := stk.lo; p < stk.hi; p++ { |
| 760 | *(*byte)(unsafe.Pointer(p)) = b |
| 761 | } |
| 762 | } |
| 763 | |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 764 | func findsghi(gp *g, stk stack) uintptr { |
| 765 | var sghi uintptr |
| 766 | for sg := gp.waiting; sg != nil; sg = sg.waitlink { |
| 767 | p := uintptr(sg.elem) + uintptr(sg.c.elemsize) |
| 768 | if stk.lo <= p && p < stk.hi && p > sghi { |
| 769 | sghi = p |
| 770 | } |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 771 | } |
| 772 | return sghi |
| 773 | } |
| 774 | |
| 775 | // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's |
| 776 | // stack they refer to while synchronizing with concurrent channel |
| 777 | // operations. It returns the number of bytes of stack copied. |
| 778 | func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { |
| 779 | if gp.waiting == nil { |
| 780 | return 0 |
| 781 | } |
| 782 | |
| 783 | // Lock channels to prevent concurrent send/receive. |
| 784 | // It's important that we *only* do this for async |
| 785 | // copystack; otherwise, gp may be in the middle of |
| 786 | // putting itself on wait queues and this would |
| 787 | // self-deadlock. |
Ian Lance Taylor | 84bb9e6 | 2016-07-07 17:43:08 -0700 | [diff] [blame] | 788 | var lastc *hchan |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 789 | for sg := gp.waiting; sg != nil; sg = sg.waitlink { |
Ian Lance Taylor | 84bb9e6 | 2016-07-07 17:43:08 -0700 | [diff] [blame] | 790 | if sg.c != lastc { |
| 791 | lock(&sg.c.lock) |
| 792 | } |
| 793 | lastc = sg.c |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | // Adjust sudogs. |
| 797 | adjustsudogs(gp, adjinfo) |
| 798 | |
| 799 | // Copy the part of the stack the sudogs point in to |
| 800 | // while holding the lock to prevent races on |
| 801 | // send/receive slots. |
| 802 | var sgsize uintptr |
| 803 | if adjinfo.sghi != 0 { |
| 804 | oldBot := adjinfo.old.hi - used |
| 805 | newBot := oldBot + adjinfo.delta |
| 806 | sgsize = adjinfo.sghi - oldBot |
| 807 | memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) |
| 808 | } |
| 809 | |
| 810 | // Unlock channels. |
Ian Lance Taylor | 84bb9e6 | 2016-07-07 17:43:08 -0700 | [diff] [blame] | 811 | lastc = nil |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 812 | for sg := gp.waiting; sg != nil; sg = sg.waitlink { |
Ian Lance Taylor | 84bb9e6 | 2016-07-07 17:43:08 -0700 | [diff] [blame] | 813 | if sg.c != lastc { |
| 814 | unlock(&sg.c.lock) |
| 815 | } |
| 816 | lastc = sg.c |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 817 | } |
| 818 | |
| 819 | return sgsize |
| 820 | } |
| 821 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 822 | // Copies gp's stack to a new stack of a different size. |
Russ Cox | 0fcf54b | 2014-11-15 08:00:38 -0500 | [diff] [blame] | 823 | // Caller must have changed gp status to Gcopystack. |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 824 | // |
| 825 | // If sync is true, this is a self-triggered stack growth and, in |
| 826 | // particular, no other G may be writing to gp's stack (e.g., via a |
| 827 | // channel operation). If sync is false, copystack protects against |
| 828 | // concurrent channel operations. |
| 829 | func copystack(gp *g, newsize uintptr, sync bool) { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 830 | if gp.syscallsp != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 831 | throw("stack growth not allowed in system call") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 832 | } |
| 833 | old := gp.stack |
| 834 | if old.lo == 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 835 | throw("nil stackbase") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 836 | } |
| 837 | used := old.hi - gp.sched.sp |
| 838 | |
| 839 | // allocate new stack |
Austin Clements | d089a6c | 2017-02-09 14:03:49 -0500 | [diff] [blame] | 840 | new := stackalloc(uint32(newsize)) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 841 | if stackPoisonCopy != 0 { |
| 842 | fillstack(new, 0xfd) |
| 843 | } |
| 844 | if stackDebug >= 1 { |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 845 | print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 846 | } |
| 847 | |
Austin Clements | da15335 | 2016-02-16 12:23:33 -0500 | [diff] [blame] | 848 | // Compute adjustment. |
| 849 | var adjinfo adjustinfo |
| 850 | adjinfo.old = old |
| 851 | adjinfo.delta = new.hi - old.hi |
| 852 | |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 853 | // Adjust sudogs, synchronizing with channel ops if necessary. |
| 854 | ncopy := used |
| 855 | if sync { |
| 856 | adjustsudogs(gp, &adjinfo) |
| 857 | } else { |
| 858 | // sudogs can point in to the stack. During concurrent |
| 859 | // shrinking, these areas may be written to. Find the |
| 860 | // highest such pointer so we can handle everything |
| 861 | // there and below carefully. (This shouldn't be far |
| 862 | // from the bottom of the stack, so there's little |
| 863 | // cost in handling everything below it carefully.) |
| 864 | adjinfo.sghi = findsghi(gp, old) |
| 865 | |
| 866 | // Synchronize with channel ops and copy the part of |
| 867 | // the stack they may interact with. |
| 868 | ncopy -= syncadjustsudogs(gp, used, &adjinfo) |
| 869 | } |
| 870 | |
| 871 | // Copy the stack (or the rest of it) to the new location |
| 872 | memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) |
Austin Clements | da15335 | 2016-02-16 12:23:33 -0500 | [diff] [blame] | 873 | |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 874 | // Adjust remaining structures that have pointers into stacks. |
| 875 | // We have to do most of these before we traceback the new |
| 876 | // stack because gentraceback uses them. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 877 | adjustctxt(gp, &adjinfo) |
| 878 | adjustdefers(gp, &adjinfo) |
| 879 | adjustpanics(gp, &adjinfo) |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 880 | if adjinfo.sghi != 0 { |
| 881 | adjinfo.sghi += adjinfo.delta |
| 882 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 883 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 884 | // Swap out old stack for new one |
| 885 | gp.stack = new |
Russ Cox | e6d3511 | 2015-01-05 16:29:21 +0000 | [diff] [blame] | 886 | gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 887 | gp.sched.sp = new.hi - used |
Russ Cox | 9c04d00 | 2015-08-26 11:39:10 -0400 | [diff] [blame] | 888 | gp.stktopsp += adjinfo.delta |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 889 | |
Austin Clements | da15335 | 2016-02-16 12:23:33 -0500 | [diff] [blame] | 890 | // Adjust pointers in the new stack. |
| 891 | gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) |
| 892 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 893 | // free old stack |
| 894 | if stackPoisonCopy != 0 { |
| 895 | fillstack(old, 0xfc) |
| 896 | } |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 897 | stackfree(old) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | // round x up to a power of 2. |
| 901 | func round2(x int32) int32 { |
| 902 | s := uint(0) |
| 903 | for 1<<s < x { |
| 904 | s++ |
| 905 | } |
| 906 | return 1 << s |
| 907 | } |
| 908 | |
| 909 | // Called from runtime·morestack when more stack is needed. |
| 910 | // Allocate larger stack and relocate to new stack. |
| 911 | // Stack growth is multiplicative, for constant amortized cost. |
| 912 | // |
| 913 | // g->atomicstatus will be Grunning or Gscanrunning upon entry. |
| 914 | // If the GC is trying to stop this g then it will set preemptscan to true. |
Austin Clements | bf9c71c | 2016-10-19 18:27:39 -0400 | [diff] [blame] | 915 | // |
Austin Clements | 3beaf26 | 2017-10-22 21:37:05 -0400 | [diff] [blame] | 916 | // This must be nowritebarrierrec because it can be called as part of |
| 917 | // stack growth from other nowritebarrierrec functions, but the |
| 918 | // compiler doesn't check this. |
| 919 | // |
| 920 | //go:nowritebarrierrec |
| 921 | func newstack() { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 922 | thisg := getg() |
| 923 | // TODO: double check all gp. shouldn't be getg(). |
Russ Cox | dcec123 | 2014-12-22 10:53:51 -0500 | [diff] [blame] | 924 | if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 925 | throw("stack growth after fork") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 926 | } |
Russ Cox | dcec123 | 2014-12-22 10:53:51 -0500 | [diff] [blame] | 927 | if thisg.m.morebuf.g.ptr() != thisg.m.curg { |
Shenghou Ma | 24be099 | 2015-08-24 21:24:23 -0400 | [diff] [blame] | 928 | print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 929 | morebuf := thisg.m.morebuf |
Russ Cox | dcec123 | 2014-12-22 10:53:51 -0500 | [diff] [blame] | 930 | traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 931 | throw("runtime: wrong goroutine in newstack") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 932 | } |
Austin Clements | bf9c71c | 2016-10-19 18:27:39 -0400 | [diff] [blame] | 933 | |
| 934 | gp := thisg.m.curg |
Austin Clements | bf9c71c | 2016-10-19 18:27:39 -0400 | [diff] [blame] | 935 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 936 | if thisg.m.curg.throwsplit { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 937 | // Update syscallsp, syscallpc in case traceback uses them. |
| 938 | morebuf := thisg.m.morebuf |
| 939 | gp.syscallsp = morebuf.sp |
| 940 | gp.syscallpc = morebuf.pc |
Austin Clements | 4421333 | 2017-12-14 15:32:12 -0500 | [diff] [blame] | 941 | pcname, pcoff := "(unknown)", uintptr(0) |
| 942 | f := findfunc(gp.sched.pc) |
| 943 | if f.valid() { |
| 944 | pcname = funcname(f) |
| 945 | pcoff = gp.sched.pc - f.entry |
| 946 | } |
| 947 | print("runtime: newstack at ", pcname, "+", hex(pcoff), |
| 948 | " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 949 | "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", |
| 950 | "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") |
Russ Cox | dcec123 | 2014-12-22 10:53:51 -0500 | [diff] [blame] | 951 | |
Austin Clements | 4671da0 | 2017-11-22 15:29:03 -0500 | [diff] [blame] | 952 | thisg.m.traceback = 2 // Include runtime frames |
Russ Cox | dcec123 | 2014-12-22 10:53:51 -0500 | [diff] [blame] | 953 | traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 954 | throw("runtime: stack split at bad time") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 955 | } |
| 956 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 957 | morebuf := thisg.m.morebuf |
| 958 | thisg.m.morebuf.pc = 0 |
| 959 | thisg.m.morebuf.lr = 0 |
| 960 | thisg.m.morebuf.sp = 0 |
Russ Cox | dcec123 | 2014-12-22 10:53:51 -0500 | [diff] [blame] | 961 | thisg.m.morebuf.g = 0 |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 962 | |
Russ Cox | f6d0054 | 2015-01-14 16:36:41 -0500 | [diff] [blame] | 963 | // NOTE: stackguard0 may change underfoot, if another thread |
| 964 | // is about to try to preempt gp. Read it just once and use that same |
| 965 | // value now and below. |
Michael Matloob | 67faca7 | 2015-11-02 14:09:24 -0500 | [diff] [blame] | 966 | preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt |
Russ Cox | f6d0054 | 2015-01-14 16:36:41 -0500 | [diff] [blame] | 967 | |
Russ Cox | aae0f07 | 2015-01-13 15:55:16 -0500 | [diff] [blame] | 968 | // Be conservative about where we preempt. |
| 969 | // We are interested in preempting user Go code, not runtime code. |
Austin Clements | 28b5118 | 2015-01-30 15:30:41 -0500 | [diff] [blame] | 970 | // If we're holding locks, mallocing, or preemption is disabled, don't |
| 971 | // preempt. |
Russ Cox | aae0f07 | 2015-01-13 15:55:16 -0500 | [diff] [blame] | 972 | // This check is very early in newstack so that even the status change |
| 973 | // from Grunning to Gwaiting and back doesn't happen in this case. |
| 974 | // That status change by itself can be viewed as a small preemption, |
| 975 | // because the GC might change Gwaiting to Gscanwaiting, and then |
| 976 | // this goroutine has to wait for the GC to finish before continuing. |
| 977 | // If the GC is in some way dependent on this goroutine (for example, |
| 978 | // it needs a lock held by the goroutine), that small preemption turns |
| 979 | // into a real deadlock. |
Russ Cox | f6d0054 | 2015-01-14 16:36:41 -0500 | [diff] [blame] | 980 | if preempt { |
Russ Cox | 181e26b | 2015-04-17 00:21:30 -0400 | [diff] [blame] | 981 | if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { |
Russ Cox | aae0f07 | 2015-01-13 15:55:16 -0500 | [diff] [blame] | 982 | // Let the goroutine keep running for now. |
| 983 | // gp->preempt is set, so it will be preempted next time. |
| 984 | gp.stackguard0 = gp.stack.lo + _StackGuard |
| 985 | gogo(&gp.sched) // never return |
| 986 | } |
| 987 | } |
| 988 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 989 | if gp.stack.lo == 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 990 | throw("missing stack in newstack") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 991 | } |
| 992 | sp := gp.sched.sp |
Jeremy Jackins | ba09d06 | 2016-04-07 15:42:35 +0900 | [diff] [blame] | 993 | if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 994 | // The call to morestack cost a word. |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 995 | sp -= sys.PtrSize |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 996 | } |
| 997 | if stackDebug >= 1 || sp < gp.stack.lo { |
| 998 | print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", |
| 999 | "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", |
| 1000 | "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") |
| 1001 | } |
| 1002 | if sp < gp.stack.lo { |
| 1003 | print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") |
| 1004 | print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 1005 | throw("runtime: split stack overflow") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1006 | } |
| 1007 | |
Russ Cox | f6d0054 | 2015-01-14 16:36:41 -0500 | [diff] [blame] | 1008 | if preempt { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1009 | if gp == thisg.m.g0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 1010 | throw("runtime: preempt g0") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1011 | } |
Russ Cox | 181e26b | 2015-04-17 00:21:30 -0400 | [diff] [blame] | 1012 | if thisg.m.p == 0 && thisg.m.locks == 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 1013 | throw("runtime: g is running but p is not") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1014 | } |
Austin Clements | 3c2a21f | 2016-02-25 15:37:40 -0500 | [diff] [blame] | 1015 | // Synchronize with scang. |
| 1016 | casgstatus(gp, _Grunning, _Gwaiting) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1017 | if gp.preemptscan { |
Rick Hudson | 273507a | 2014-11-21 16:46:27 -0500 | [diff] [blame] | 1018 | for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { |
Austin Clements | 724f829 | 2015-05-28 12:37:12 -0400 | [diff] [blame] | 1019 | // Likely to be racing with the GC as |
| 1020 | // it sees a _Gwaiting and does the |
| 1021 | // stack scan. If so, gcworkdone will |
| 1022 | // be set and gcphasework will simply |
| 1023 | // return. |
Rick Hudson | 273507a | 2014-11-21 16:46:27 -0500 | [diff] [blame] | 1024 | } |
Russ Cox | 3c60e6e | 2015-06-16 19:20:18 -0400 | [diff] [blame] | 1025 | if !gp.gcscandone { |
Austin Clements | 3be48b4 | 2016-05-23 22:14:53 -0400 | [diff] [blame] | 1026 | // gcw is safe because we're on the |
| 1027 | // system stack. |
| 1028 | gcw := &gp.m.p.ptr().gcw |
| 1029 | scanstack(gp, gcw) |
| 1030 | if gcBlackenPromptly { |
| 1031 | gcw.dispose() |
| 1032 | } |
Russ Cox | 3c60e6e | 2015-06-16 19:20:18 -0400 | [diff] [blame] | 1033 | gp.gcscandone = true |
| 1034 | } |
| 1035 | gp.preemptscan = false |
| 1036 | gp.preempt = false |
Rick Hudson | 273507a | 2014-11-21 16:46:27 -0500 | [diff] [blame] | 1037 | casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) |
Austin Clements | 5b765ce | 2016-03-11 14:08:10 -0500 | [diff] [blame] | 1038 | // This clears gcscanvalid. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1039 | casgstatus(gp, _Gwaiting, _Grunning) |
Russ Cox | e6d3511 | 2015-01-05 16:29:21 +0000 | [diff] [blame] | 1040 | gp.stackguard0 = gp.stack.lo + _StackGuard |
Russ Cox | 3c60e6e | 2015-06-16 19:20:18 -0400 | [diff] [blame] | 1041 | gogo(&gp.sched) // never return |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1042 | } |
| 1043 | |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1044 | // Act like goroutine called runtime.Gosched. |
| 1045 | casgstatus(gp, _Gwaiting, _Grunning) |
Dmitry Vyukov | 5288fad | 2014-12-12 18:41:57 +0100 | [diff] [blame] | 1046 | gopreempt_m(gp) // never return |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1047 | } |
| 1048 | |
| 1049 | // Allocate a bigger segment and move the stack. |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 1050 | oldsize := gp.stack.hi - gp.stack.lo |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1051 | newsize := oldsize * 2 |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 1052 | if newsize > maxstacksize { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1053 | print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 1054 | throw("stack overflow") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1055 | } |
| 1056 | |
Austin Clements | 3c2a21f | 2016-02-25 15:37:40 -0500 | [diff] [blame] | 1057 | // The goroutine must be executing in order to call newstack, |
| 1058 | // so it must be Grunning (or Gscanrunning). |
| 1059 | casgstatus(gp, _Grunning, _Gcopystack) |
Russ Cox | 0fcf54b | 2014-11-15 08:00:38 -0500 | [diff] [blame] | 1060 | |
| 1061 | // The concurrent GC will not scan the stack while we are doing the copy since |
| 1062 | // the gp is in a Gcopystack status. |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 1063 | copystack(gp, newsize, true) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1064 | if stackDebug >= 1 { |
| 1065 | print("stack grow done\n") |
| 1066 | } |
Russ Cox | 0fcf54b | 2014-11-15 08:00:38 -0500 | [diff] [blame] | 1067 | casgstatus(gp, _Gcopystack, _Grunning) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1068 | gogo(&gp.sched) |
| 1069 | } |
| 1070 | |
| 1071 | //go:nosplit |
| 1072 | func nilfunc() { |
| 1073 | *(*uint8)(nil) = 0 |
| 1074 | } |
| 1075 | |
| 1076 | // adjust Gobuf as if it executed a call to fn |
| 1077 | // and then did an immediate gosave. |
| 1078 | func gostartcallfn(gobuf *gobuf, fv *funcval) { |
| 1079 | var fn unsafe.Pointer |
| 1080 | if fv != nil { |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 1081 | fn = unsafe.Pointer(fv.fn) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1082 | } else { |
| 1083 | fn = unsafe.Pointer(funcPC(nilfunc)) |
| 1084 | } |
Matthew Dempsky | 4c2465d | 2015-10-15 14:33:50 -0700 | [diff] [blame] | 1085 | gostartcall(gobuf, fn, unsafe.Pointer(fv)) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1086 | } |
| 1087 | |
| 1088 | // Maybe shrink the stack being used by gp. |
| 1089 | // Called at garbage collection time. |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 1090 | // gp must be stopped, but the world need not be. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1091 | func shrinkstack(gp *g) { |
Austin Clements | f11e4eb | 2016-02-15 18:30:48 -0500 | [diff] [blame] | 1092 | gstatus := readgstatus(gp) |
| 1093 | if gstatus&^_Gscan == _Gdead { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1094 | if gp.stack.lo != 0 { |
| 1095 | // Free whole stack - it will get reallocated |
| 1096 | // if G is used again. |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 1097 | stackfree(gp.stack) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1098 | gp.stack.lo = 0 |
| 1099 | gp.stack.hi = 0 |
| 1100 | } |
| 1101 | return |
| 1102 | } |
| 1103 | if gp.stack.lo == 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 1104 | throw("missing stack in shrinkstack") |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1105 | } |
Austin Clements | f11e4eb | 2016-02-15 18:30:48 -0500 | [diff] [blame] | 1106 | if gstatus&_Gscan == 0 { |
| 1107 | throw("bad status in shrinkstack") |
| 1108 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1109 | |
Russ Cox | d5b40b6 | 2015-06-05 11:51:49 -0400 | [diff] [blame] | 1110 | if debug.gcshrinkstackoff > 0 { |
| 1111 | return |
| 1112 | } |
Austin Clements | d6625ca | 2016-10-24 14:20:07 -0400 | [diff] [blame] | 1113 | if gp.startpc == gcBgMarkWorkerPC { |
| 1114 | // We're not allowed to shrink the gcBgMarkWorker |
| 1115 | // stack (see gcBgMarkWorker for explanation). |
| 1116 | return |
| 1117 | } |
Russ Cox | d5b40b6 | 2015-06-05 11:51:49 -0400 | [diff] [blame] | 1118 | |
Austin Clements | 0993b2f | 2017-02-09 14:11:13 -0500 | [diff] [blame] | 1119 | oldsize := gp.stack.hi - gp.stack.lo |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1120 | newsize := oldsize / 2 |
Austin Clements | 7387121 | 2015-06-04 17:28:02 -0400 | [diff] [blame] | 1121 | // Don't shrink the allocation below the minimum-sized stack |
| 1122 | // allocation. |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1123 | if newsize < _FixedStack { |
Austin Clements | 7387121 | 2015-06-04 17:28:02 -0400 | [diff] [blame] | 1124 | return |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1125 | } |
Austin Clements | 7387121 | 2015-06-04 17:28:02 -0400 | [diff] [blame] | 1126 | // Compute how much of the stack is currently in use and only |
| 1127 | // shrink the stack if gp is using less than a quarter of its |
| 1128 | // current stack. The currently used stack includes everything |
| 1129 | // down to the SP plus the stack guard space that ensures |
| 1130 | // there's room for nosplit functions. |
| 1131 | avail := gp.stack.hi - gp.stack.lo |
| 1132 | if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { |
| 1133 | return |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1134 | } |
| 1135 | |
| 1136 | // We can't copy the stack if we're in a syscall. |
| 1137 | // The syscall might have pointers into the stack. |
| 1138 | if gp.syscallsp != 0 { |
| 1139 | return |
| 1140 | } |
Michael Matloob | 432cb66 | 2015-11-11 12:39:30 -0500 | [diff] [blame] | 1141 | if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1142 | return |
| 1143 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1144 | |
| 1145 | if stackDebug > 0 { |
| 1146 | print("shrinking stack ", oldsize, "->", newsize, "\n") |
| 1147 | } |
Russ Cox | 0fcf54b | 2014-11-15 08:00:38 -0500 | [diff] [blame] | 1148 | |
Austin Clements | 276b177 | 2016-02-15 17:38:06 -0500 | [diff] [blame] | 1149 | copystack(gp, newsize, false) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1150 | } |
| 1151 | |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 1152 | // freeStackSpans frees unused stack spans at the end of GC. |
| 1153 | func freeStackSpans() { |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1154 | lock(&stackpoolmu) |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 1155 | |
| 1156 | // Scan stack pools for empty stack spans. |
| 1157 | for order := range stackpool { |
| 1158 | list := &stackpool[order] |
Matthew Dempsky | 1652a2c | 2015-10-15 15:59:49 -0700 | [diff] [blame] | 1159 | for s := list.first; s != nil; { |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 1160 | next := s.next |
Rick Hudson | e4ac2d4 | 2016-02-16 17:16:43 -0500 | [diff] [blame] | 1161 | if s.allocCount == 0 { |
Matthew Dempsky | c17c42e | 2015-11-11 16:13:51 -0800 | [diff] [blame] | 1162 | list.remove(s) |
Austin Clements | ab9db51 | 2017-03-16 15:02:02 -0400 | [diff] [blame] | 1163 | s.manualFreeList = 0 |
Austin Clements | 407c56ae | 2017-03-16 14:46:53 -0400 | [diff] [blame] | 1164 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 1165 | } |
| 1166 | s = next |
| 1167 | } |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1168 | } |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 1169 | |
Austin Clements | d57056b | 2015-06-22 10:24:50 -0400 | [diff] [blame] | 1170 | unlock(&stackpoolmu) |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 1171 | |
| 1172 | // Free large stack spans. |
| 1173 | lock(&stackLarge.lock) |
| 1174 | for i := range stackLarge.free { |
| 1175 | for s := stackLarge.free[i].first; s != nil; { |
| 1176 | next := s.next |
| 1177 | stackLarge.free[i].remove(s) |
Austin Clements | 407c56ae | 2017-03-16 14:46:53 -0400 | [diff] [blame] | 1178 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Austin Clements | 0cbf8d1 | 2015-12-14 14:30:25 -0500 | [diff] [blame] | 1179 | s = next |
| 1180 | } |
| 1181 | } |
| 1182 | unlock(&stackLarge.lock) |
Russ Cox | d98553a | 2014-11-11 17:04:34 -0500 | [diff] [blame] | 1183 | } |
Russ Cox | e6d3511 | 2015-01-05 16:29:21 +0000 | [diff] [blame] | 1184 | |
| 1185 | //go:nosplit |
| 1186 | func morestackc() { |
| 1187 | systemstack(func() { |
Austin Clements | 4234d1d | 2017-03-23 11:20:17 -0400 | [diff] [blame] | 1188 | throw("attempt to execute system stack code on user stack") |
Russ Cox | e6d3511 | 2015-01-05 16:29:21 +0000 | [diff] [blame] | 1189 | }) |
| 1190 | } |