| // Copyright 2014 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| package runtime |
| |
| import ( |
| "internal/cpu" |
| "runtime/internal/atomic" |
| "runtime/internal/sys" |
| "unsafe" |
| ) |
| |
| // Functions called by C code. |
| //go:linkname main |
| //go:linkname goparkunlock |
| //go:linkname newextram |
| //go:linkname acquirep |
| //go:linkname releasep |
| //go:linkname incidlelocked |
| //go:linkname ginit |
| //go:linkname schedinit |
| //go:linkname ready |
| //go:linkname stopm |
| //go:linkname handoffp |
| //go:linkname wakep |
| //go:linkname stoplockedm |
| //go:linkname schedule |
| //go:linkname execute |
| //go:linkname goexit1 |
| //go:linkname reentersyscall |
| //go:linkname reentersyscallblock |
| //go:linkname exitsyscall |
| //go:linkname gfget |
| //go:linkname kickoff |
| //go:linkname mstart1 |
| //go:linkname mexit |
| //go:linkname globrunqput |
| //go:linkname pidleget |
| |
| // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go). |
| //go:linkname getm |
| |
| // Function called by misc/cgo/test. |
| //go:linkname lockedOSThread |
| |
| // C functions for thread and context management. |
| func newosproc(*m) |
| |
| //go:noescape |
| func malg(bool, bool, *unsafe.Pointer, *uintptr) *g |
| |
| //go:noescape |
| func resetNewG(*g, *unsafe.Pointer, *uintptr) |
| func gogo(*g) |
| func setGContext() |
| func makeGContext(*g, unsafe.Pointer, uintptr) |
| func getTraceback(me, gp *g) |
| func gtraceback(*g) |
| func _cgo_notify_runtime_init_done() |
| func alreadyInCallers() bool |
| func stackfree(*g) |
| |
| // Functions created by the compiler. |
| //extern __go_init_main |
| func main_init() |
| |
| //extern main.main |
| func main_main() |
| |
| var buildVersion = sys.TheVersion |
| |
| // set using cmd/go/internal/modload.ModInfoProg |
| var modinfo string |
| |
| // Goroutine scheduler |
| // The scheduler's job is to distribute ready-to-run goroutines over worker threads. |
| // |
| // The main concepts are: |
| // G - goroutine. |
| // M - worker thread, or machine. |
| // P - processor, a resource that is required to execute Go code. |
| // M must have an associated P to execute Go code, however it can be |
| // blocked or in a syscall w/o an associated P. |
| // |
| // Design doc at https://golang.org/s/go11sched. |
| |
| // Worker thread parking/unparking. |
| // We need to balance between keeping enough running worker threads to utilize |
| // available hardware parallelism and parking excessive running worker threads |
| // to conserve CPU resources and power. This is not simple for two reasons: |
| // (1) scheduler state is intentionally distributed (in particular, per-P work |
| // queues), so it is not possible to compute global predicates on fast paths; |
| // (2) for optimal thread management we would need to know the future (don't park |
| // a worker thread when a new goroutine will be readied in near future). |
| // |
| // Three rejected approaches that would work badly: |
| // 1. Centralize all scheduler state (would inhibit scalability). |
| // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there |
| // is a spare P, unpark a thread and handoff it the thread and the goroutine. |
| // This would lead to thread state thrashing, as the thread that readied the |
| // goroutine can be out of work the very next moment, we will need to park it. |
| // Also, it would destroy locality of computation as we want to preserve |
| // dependent goroutines on the same thread; and introduce additional latency. |
| // 3. Unpark an additional thread whenever we ready a goroutine and there is an |
| // idle P, but don't do handoff. This would lead to excessive thread parking/ |
| // unparking as the additional threads will instantly park without discovering |
| // any work to do. |
| // |
| // The current approach: |
| // We unpark an additional thread when we ready a goroutine if (1) there is an |
| // idle P and there are no "spinning" worker threads. A worker thread is considered |
| // spinning if it is out of local work and did not find work in global run queue/ |
| // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. |
| // Threads unparked this way are also considered spinning; we don't do goroutine |
| // handoff so such threads are out of work initially. Spinning threads do some |
| // spinning looking for work in per-P run queues before parking. If a spinning |
| // thread finds work it takes itself out of the spinning state and proceeds to |
| // execution. If it does not find work it takes itself out of the spinning state |
| // and then parks. |
| // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark |
| // new threads when readying goroutines. To compensate for that, if the last spinning |
| // thread finds work and stops spinning, it must unpark a new spinning thread. |
| // This approach smooths out unjustified spikes of thread unparking, |
| // but at the same time guarantees eventual maximal CPU parallelism utilization. |
| // |
| // The main implementation complication is that we need to be very careful during |
| // spinning->non-spinning thread transition. This transition can race with submission |
| // of a new goroutine, and either one part or another needs to unpark another worker |
| // thread. If they both fail to do that, we can end up with semi-persistent CPU |
| // underutilization. The general pattern for goroutine readying is: submit a goroutine |
| // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. |
| // The general pattern for spinning->non-spinning transition is: decrement nmspinning, |
| // #StoreLoad-style memory barrier, check all per-P work queues for new work. |
| // Note that all this complexity does not apply to global run queue as we are not |
| // sloppy about thread unparking when submitting to global queue. Also see comments |
| // for nmspinning manipulation. |
| |
| var ( |
| m0 m |
| g0 g |
| mcache0 *mcache |
| raceprocctx0 uintptr |
| ) |
| |
| // main_init_done is a signal used by cgocallbackg that initialization |
| // has been completed. It is made before _cgo_notify_runtime_init_done, |
| // so all cgo calls can rely on it existing. When main_init is complete, |
| // it is closed, meaning cgocallbackg can reliably receive from it. |
| var main_init_done chan bool |
| |
| // mainStarted indicates that the main M has started. |
| var mainStarted bool |
| |
| // runtimeInitTime is the nanotime() at which the runtime started. |
| var runtimeInitTime int64 |
| |
| // Value to use for signal mask for newly created M's. |
| var initSigmask sigset |
| |
| // The main goroutine. |
| func main(unsafe.Pointer) { |
| g := getg() |
| |
| // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. |
| // Using decimal instead of binary GB and MB because |
| // they look nicer in the stack overflow failure message. |
| if sys.PtrSize == 8 { |
| maxstacksize = 1000000000 |
| } else { |
| maxstacksize = 250000000 |
| } |
| |
| // An upper limit for max stack size. Used to avoid random crashes |
| // after calling SetMaxStack and trying to allocate a stack that is too big, |
| // since stackalloc works with 32-bit sizes. |
| // Not used by gofrontend. |
| // maxstackceiling = 2 * maxstacksize |
| |
| // Allow newproc to start new Ms. |
| mainStarted = true |
| |
| if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon |
| // For runtime_syscall_doAllThreadsSyscall, we |
| // register sysmon is not ready for the world to be |
| // stopped. |
| atomic.Store(&sched.sysmonStarting, 1) |
| systemstack(func() { |
| newm(sysmon, nil, -1) |
| }) |
| } |
| |
| // Lock the main goroutine onto this, the main OS thread, |
| // during initialization. Most programs won't care, but a few |
| // do require certain calls to be made by the main thread. |
| // Those can arrange for main.main to run in the main thread |
| // by calling runtime.LockOSThread during initialization |
| // to preserve the lock. |
| lockOSThread() |
| |
| if g.m != &m0 { |
| throw("runtime.main not on m0") |
| } |
| m0.doesPark = true |
| |
| // Record when the world started. |
| // Must be before doInit for tracing init. |
| runtimeInitTime = nanotime() |
| if runtimeInitTime == 0 { |
| throw("nanotime returning zero") |
| } |
| |
| if debug.inittrace != 0 { |
| inittrace.id = getg().goid |
| inittrace.active = true |
| } |
| |
| // doInit(&runtime_inittask) // Must be before defer. |
| |
| // Defer unlock so that runtime.Goexit during init does the unlock too. |
| needUnlock := true |
| defer func() { |
| if needUnlock { |
| unlockOSThread() |
| } |
| }() |
| |
| main_init_done = make(chan bool) |
| if iscgo { |
| // Start the template thread in case we enter Go from |
| // a C-created thread and need to create a new thread. |
| startTemplateThread() |
| _cgo_notify_runtime_init_done() |
| } |
| |
| fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime |
| fn() |
| createGcRootsIndex() |
| |
| // For gccgo we have to wait until after main is initialized |
| // to enable GC, because initializing main registers the GC roots. |
| gcenable() |
| |
| // Disable init tracing after main init done to avoid overhead |
| // of collecting statistics in malloc and newproc |
| inittrace.active = false |
| |
| close(main_init_done) |
| |
| needUnlock = false |
| unlockOSThread() |
| |
| if isarchive || islibrary { |
| // A program compiled with -buildmode=c-archive or c-shared |
| // has a main, but it is not executed. |
| return |
| } |
| fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime |
| fn() |
| if raceenabled { |
| racefini() |
| } |
| |
| // Make racy client program work: if panicking on |
| // another goroutine at the same time as main returns, |
| // let the other goroutine finish printing the panic trace. |
| // Once it does, it will exit. See issues 3934 and 20018. |
| if atomic.Load(&runningPanicDefers) != 0 { |
| // Running deferred functions should not take long. |
| for c := 0; c < 1000; c++ { |
| if atomic.Load(&runningPanicDefers) == 0 { |
| break |
| } |
| Gosched() |
| } |
| } |
| if atomic.Load(&panicking) != 0 { |
| gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) |
| } |
| |
| exit(0) |
| for { |
| var x *int32 |
| *x = 0 |
| } |
| } |
| |
| // os_beforeExit is called from os.Exit(0). |
| //go:linkname os_beforeExit os.runtime__beforeExit |
| func os_beforeExit() { |
| if raceenabled { |
| racefini() |
| } |
| } |
| |
| // start forcegc helper goroutine |
| func init() { |
| expectSystemGoroutine() |
| go forcegchelper() |
| } |
| |
| func forcegchelper() { |
| setSystemGoroutine() |
| |
| forcegc.g = getg() |
| lockInit(&forcegc.lock, lockRankForcegc) |
| for { |
| lock(&forcegc.lock) |
| if forcegc.idle != 0 { |
| throw("forcegc: phase error") |
| } |
| atomic.Store(&forcegc.idle, 1) |
| goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1) |
| // this goroutine is explicitly resumed by sysmon |
| if debug.gctrace > 0 { |
| println("GC forced") |
| } |
| // Time-triggered, fully concurrent. |
| gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) |
| } |
| } |
| |
| //go:nosplit |
| |
| // Gosched yields the processor, allowing other goroutines to run. It does not |
| // suspend the current goroutine, so execution resumes automatically. |
| func Gosched() { |
| checkTimeouts() |
| mcall(gosched_m) |
| } |
| |
| // goschedguarded yields the processor like gosched, but also checks |
| // for forbidden states and opts out of the yield in those cases. |
| //go:nosplit |
| func goschedguarded() { |
| mcall(goschedguarded_m) |
| } |
| |
| // Puts the current goroutine into a waiting state and calls unlockf on the |
| // system stack. |
| // |
| // If unlockf returns false, the goroutine is resumed. |
| // |
| // unlockf must not access this G's stack, as it may be moved between |
| // the call to gopark and the call to unlockf. |
| // |
| // Note that because unlockf is called after putting the G into a waiting |
| // state, the G may have already been readied by the time unlockf is called |
| // unless there is external synchronization preventing the G from being |
| // readied. If unlockf returns false, it must guarantee that the G cannot be |
| // externally readied. |
| // |
| // Reason explains why the goroutine has been parked. It is displayed in stack |
| // traces and heap dumps. Reasons should be unique and descriptive. Do not |
| // re-use reasons, add new ones. |
| func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { |
| if reason != waitReasonSleep { |
| checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy |
| } |
| mp := acquirem() |
| gp := mp.curg |
| status := readgstatus(gp) |
| if status != _Grunning && status != _Gscanrunning { |
| throw("gopark: bad g status") |
| } |
| mp.waitlock = lock |
| mp.waitunlockf = unlockf |
| gp.waitreason = reason |
| mp.waittraceev = traceEv |
| mp.waittraceskip = traceskip |
| releasem(mp) |
| // can't do anything that might move the G between Ms here. |
| mcall(park_m) |
| } |
| |
| // Puts the current goroutine into a waiting state and unlocks the lock. |
| // The goroutine can be made runnable again by calling goready(gp). |
| func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) { |
| gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) |
| } |
| |
| func goready(gp *g, traceskip int) { |
| systemstack(func() { |
| ready(gp, traceskip, true) |
| }) |
| } |
| |
| //go:nosplit |
| func acquireSudog() *sudog { |
| // Delicate dance: the semaphore implementation calls |
| // acquireSudog, acquireSudog calls new(sudog), |
| // new calls malloc, malloc can call the garbage collector, |
| // and the garbage collector calls the semaphore implementation |
| // in stopTheWorld. |
| // Break the cycle by doing acquirem/releasem around new(sudog). |
| // The acquirem/releasem increments m.locks during new(sudog), |
| // which keeps the garbage collector from being invoked. |
| mp := acquirem() |
| pp := mp.p.ptr() |
| if len(pp.sudogcache) == 0 { |
| lock(&sched.sudoglock) |
| // First, try to grab a batch from central cache. |
| for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { |
| s := sched.sudogcache |
| sched.sudogcache = s.next |
| s.next = nil |
| pp.sudogcache = append(pp.sudogcache, s) |
| } |
| unlock(&sched.sudoglock) |
| // If the central cache is empty, allocate a new one. |
| if len(pp.sudogcache) == 0 { |
| pp.sudogcache = append(pp.sudogcache, new(sudog)) |
| } |
| } |
| n := len(pp.sudogcache) |
| s := pp.sudogcache[n-1] |
| pp.sudogcache[n-1] = nil |
| pp.sudogcache = pp.sudogcache[:n-1] |
| if s.elem != nil { |
| throw("acquireSudog: found s.elem != nil in cache") |
| } |
| releasem(mp) |
| return s |
| } |
| |
| //go:nosplit |
| func releaseSudog(s *sudog) { |
| if s.elem != nil { |
| throw("runtime: sudog with non-nil elem") |
| } |
| if s.isSelect { |
| throw("runtime: sudog with non-false isSelect") |
| } |
| if s.next != nil { |
| throw("runtime: sudog with non-nil next") |
| } |
| if s.prev != nil { |
| throw("runtime: sudog with non-nil prev") |
| } |
| if s.waitlink != nil { |
| throw("runtime: sudog with non-nil waitlink") |
| } |
| if s.c != nil { |
| throw("runtime: sudog with non-nil c") |
| } |
| gp := getg() |
| if gp.param != nil { |
| throw("runtime: releaseSudog with non-nil gp.param") |
| } |
| mp := acquirem() // avoid rescheduling to another P |
| pp := mp.p.ptr() |
| if len(pp.sudogcache) == cap(pp.sudogcache) { |
| // Transfer half of local cache to the central cache. |
| var first, last *sudog |
| for len(pp.sudogcache) > cap(pp.sudogcache)/2 { |
| n := len(pp.sudogcache) |
| p := pp.sudogcache[n-1] |
| pp.sudogcache[n-1] = nil |
| pp.sudogcache = pp.sudogcache[:n-1] |
| if first == nil { |
| first = p |
| } else { |
| last.next = p |
| } |
| last = p |
| } |
| lock(&sched.sudoglock) |
| last.next = sched.sudogcache |
| sched.sudogcache = first |
| unlock(&sched.sudoglock) |
| } |
| pp.sudogcache = append(pp.sudogcache, s) |
| releasem(mp) |
| } |
| |
| // funcPC returns the entry PC of the function f. |
| // It assumes that f is a func value. Otherwise the behavior is undefined. |
| // CAREFUL: In programs with plugins, funcPC can return different values |
| // for the same function (because there are actually multiple copies of |
| // the same function in the address space). To be safe, don't use the |
| // results of this function in any == expression. It is only safe to |
| // use the result as an address at which to start executing code. |
| // |
| // For gccgo note that this differs from the gc implementation; the gc |
| // implementation adds sys.PtrSize to the address of the interface |
| // value, but GCC's alias analysis decides that that can not be a |
| // reference to the second field of the interface, and in some cases |
| // it drops the initialization of the second field as a dead store. |
| //go:nosplit |
| func funcPC(f interface{}) uintptr { |
| i := (*iface)(unsafe.Pointer(&f)) |
| r := *(*uintptr)(i.data) |
| if cpu.FunctionDescriptors { |
| // With PPC64 ELF ABI v1 function descriptors the |
| // function address is a pointer to a struct whose |
| // first field is the actual PC. |
| r = *(*uintptr)(unsafe.Pointer(r)) |
| } |
| return r |
| } |
| |
| func lockedOSThread() bool { |
| gp := getg() |
| return gp.lockedm != 0 && gp.m.lockedg != 0 |
| } |
| |
| var ( |
| // allgs contains all Gs ever created (including dead Gs), and thus |
| // never shrinks. |
| // |
| // Access via the slice is protected by allglock or stop-the-world. |
| // Readers that cannot take the lock may (carefully!) use the atomic |
| // variables below. |
| allglock mutex |
| allgs []*g |
| |
| // allglen and allgptr are atomic variables that contain len(allg) and |
| // &allg[0] respectively. Proper ordering depends on totally-ordered |
| // loads and stores. Writes are protected by allglock. |
| // |
| // allgptr is updated before allglen. Readers should read allglen |
| // before allgptr to ensure that allglen is always <= len(allgptr). New |
| // Gs appended during the race can be missed. For a consistent view of |
| // all Gs, allglock must be held. |
| // |
| // allgptr copies should always be stored as a concrete type or |
| // unsafe.Pointer, not uintptr, to ensure that GC can still reach it |
| // even if it points to a stale array. |
| allglen uintptr |
| allgptr **g |
| ) |
| |
| func allgadd(gp *g) { |
| if readgstatus(gp) == _Gidle { |
| throw("allgadd: bad status Gidle") |
| } |
| |
| lock(&allglock) |
| allgs = append(allgs, gp) |
| if &allgs[0] != allgptr { |
| atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0])) |
| } |
| atomic.Storeuintptr(&allglen, uintptr(len(allgs))) |
| unlock(&allglock) |
| } |
| |
| // atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex. |
| func atomicAllG() (**g, uintptr) { |
| length := atomic.Loaduintptr(&allglen) |
| ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr))) |
| return ptr, length |
| } |
| |
| // atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG. |
| func atomicAllGIndex(ptr **g, i uintptr) *g { |
| return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize)) |
| } |
| |
| const ( |
| // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. |
| // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. |
| _GoidCacheBatch = 16 |
| ) |
| |
| // cpuinit extracts the environment variable GODEBUG from the environment on |
| // Unix-like operating systems and calls internal/cpu.Initialize. |
| func cpuinit() { |
| const prefix = "GODEBUG=" |
| var env string |
| |
| switch GOOS { |
| case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux": |
| cpu.DebugOptions = true |
| |
| // Similar to goenv_unix but extracts the environment value for |
| // GODEBUG directly. |
| // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() |
| n := int32(0) |
| for argv_index(argv, argc+1+n) != nil { |
| n++ |
| } |
| |
| for i := int32(0); i < n; i++ { |
| p := argv_index(argv, argc+1+i) |
| s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) |
| |
| if hasPrefix(s, prefix) { |
| env = gostring(p)[len(prefix):] |
| break |
| } |
| } |
| } |
| |
| cpu.Initialize(env) |
| } |
| |
| func ginit() { |
| _m_ := &m0 |
| _g_ := &g0 |
| _m_.g0 = _g_ |
| _m_.curg = _g_ |
| _g_.m = _m_ |
| setg(_g_) |
| } |
| |
| // The bootstrap sequence is: |
| // |
| // call osinit |
| // call schedinit |
| // make & queue new G |
| // call runtime·mstart |
| // |
| // The new G calls runtime·main. |
| func schedinit() { |
| lockInit(&sched.lock, lockRankSched) |
| lockInit(&sched.sysmonlock, lockRankSysmon) |
| lockInit(&sched.deferlock, lockRankDefer) |
| lockInit(&sched.sudoglock, lockRankSudog) |
| lockInit(&deadlock, lockRankDeadlock) |
| lockInit(&paniclk, lockRankPanic) |
| lockInit(&allglock, lockRankAllg) |
| lockInit(&allpLock, lockRankAllp) |
| // lockInit(&reflectOffs.lock, lockRankReflectOffs) |
| lockInit(&finlock, lockRankFin) |
| lockInit(&trace.bufLock, lockRankTraceBuf) |
| lockInit(&trace.stringsLock, lockRankTraceStrings) |
| lockInit(&trace.lock, lockRankTrace) |
| lockInit(&cpuprof.lock, lockRankCpuprof) |
| lockInit(&trace.stackTab.lock, lockRankTraceStackTab) |
| // Enforce that this lock is always a leaf lock. |
| // All of this lock's critical sections should be |
| // extremely short. |
| lockInit(&memstats.heapStats.noPLock, lockRankLeafRank) |
| |
| _g_ := getg() |
| sched.maxmcount = 10000 |
| |
| usestackmaps = probestackmaps() |
| |
| // The world starts stopped. |
| worldStopped() |
| |
| mallocinit() |
| fastrandinit() // must run before mcommoninit |
| mcommoninit(_g_.m, -1) |
| cpuinit() // must run before alginit |
| alginit() // maps must not be used before this call |
| |
| sigsave(&_g_.m.sigmask) |
| initSigmask = _g_.m.sigmask |
| |
| goargs() |
| goenvs() |
| parsedebugvars() |
| gcinit() |
| |
| lock(&sched.lock) |
| sched.lastpoll = uint64(nanotime()) |
| procs := ncpu |
| |
| // In 32-bit mode, we can burn a lot of memory on thread stacks. |
| // Try to avoid this by limiting the number of threads we run |
| // by default. |
| if sys.PtrSize == 4 && procs > 32 { |
| procs = 32 |
| } |
| |
| if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { |
| procs = n |
| } |
| if procresize(procs) != nil { |
| throw("unknown runnable goroutine during bootstrap") |
| } |
| unlock(&sched.lock) |
| |
| // World is effectively started now, as P's can run. |
| worldStarted() |
| |
| // For cgocheck > 1, we turn on the write barrier at all times |
| // and check all pointer writes. We can't do this until after |
| // procresize because the write barrier needs a P. |
| if debug.cgocheck > 1 { |
| writeBarrier.cgo = true |
| writeBarrier.enabled = true |
| for _, p := range allp { |
| p.wbBuf.reset() |
| } |
| } |
| |
| if buildVersion == "" { |
| // Condition should never trigger. This code just serves |
| // to ensure runtime·buildVersion is kept in the resulting binary. |
| buildVersion = "unknown" |
| } |
| if len(modinfo) == 1 { |
| // Condition should never trigger. This code just serves |
| // to ensure runtime·modinfo is kept in the resulting binary. |
| modinfo = "" |
| } |
| } |
| |
| func dumpgstatus(gp *g) { |
| _g_ := getg() |
| print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") |
| print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") |
| } |
| |
| // sched.lock must be held. |
| func checkmcount() { |
| assertLockHeld(&sched.lock) |
| |
| if mcount() > sched.maxmcount { |
| print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") |
| throw("thread exhaustion") |
| } |
| } |
| |
| // mReserveID returns the next ID to use for a new m. This new m is immediately |
| // considered 'running' by checkdead. |
| // |
| // sched.lock must be held. |
| func mReserveID() int64 { |
| assertLockHeld(&sched.lock) |
| |
| if sched.mnext+1 < sched.mnext { |
| throw("runtime: thread ID overflow") |
| } |
| id := sched.mnext |
| sched.mnext++ |
| checkmcount() |
| return id |
| } |
| |
| // Pre-allocated ID may be passed as 'id', or omitted by passing -1. |
| func mcommoninit(mp *m, id int64) { |
| _g_ := getg() |
| |
| // g0 stack won't make sense for user (and is not necessary unwindable). |
| if _g_ != _g_.m.g0 { |
| callers(1, mp.createstack[:]) |
| } |
| |
| lock(&sched.lock) |
| |
| if id >= 0 { |
| mp.id = id |
| } else { |
| mp.id = mReserveID() |
| } |
| |
| mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed)) |
| mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed)) |
| if mp.fastrand[0]|mp.fastrand[1] == 0 { |
| mp.fastrand[1] = 1 |
| } |
| |
| mpreinit(mp) |
| |
| // Add to allm so garbage collector doesn't free g->m |
| // when it is just in a register or thread-local storage. |
| mp.alllink = allm |
| |
| // NumCgoCall() iterates over allm w/o schedlock, |
| // so we need to publish it safely. |
| atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) |
| unlock(&sched.lock) |
| } |
| |
| var fastrandseed uintptr |
| |
| func fastrandinit() { |
| s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] |
| getRandomData(s) |
| } |
| |
| // Mark gp ready to run. |
| func ready(gp *g, traceskip int, next bool) { |
| if trace.enabled { |
| traceGoUnpark(gp, traceskip) |
| } |
| |
| status := readgstatus(gp) |
| |
| // Mark runnable. |
| _g_ := getg() |
| mp := acquirem() // disable preemption because it can be holding p in a local var |
| if status&^_Gscan != _Gwaiting { |
| dumpgstatus(gp) |
| throw("bad g->status in ready") |
| } |
| |
| // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| runqput(_g_.m.p.ptr(), gp, next) |
| wakep() |
| releasem(mp) |
| } |
| |
| // freezeStopWait is a large value that freezetheworld sets |
| // sched.stopwait to in order to request that all Gs permanently stop. |
| const freezeStopWait = 0x7fffffff |
| |
| // freezing is set to non-zero if the runtime is trying to freeze the |
| // world. |
| var freezing uint32 |
| |
| // Similar to stopTheWorld but best-effort and can be called several times. |
| // There is no reverse operation, used during crashing. |
| // This function must not lock any mutexes. |
| func freezetheworld() { |
| atomic.Store(&freezing, 1) |
| // stopwait and preemption requests can be lost |
| // due to races with concurrently executing threads, |
| // so try several times |
| for i := 0; i < 5; i++ { |
| // this should tell the scheduler to not start any new goroutines |
| sched.stopwait = freezeStopWait |
| atomic.Store(&sched.gcwaiting, 1) |
| // this should stop running goroutines |
| if !preemptall() { |
| break // no running goroutines |
| } |
| usleep(1000) |
| } |
| // to be sure |
| usleep(1000) |
| preemptall() |
| usleep(1000) |
| } |
| |
| // All reads and writes of g's status go through readgstatus, casgstatus |
| // castogscanstatus, casfrom_Gscanstatus. |
| //go:nosplit |
| func readgstatus(gp *g) uint32 { |
| return atomic.Load(&gp.atomicstatus) |
| } |
| |
| // The Gscanstatuses are acting like locks and this releases them. |
| // If it proves to be a performance hit we should be able to make these |
| // simple atomic stores but for now we are going to throw if |
| // we see an inconsistent state. |
| func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { |
| success := false |
| |
| // Check that transition is valid. |
| switch oldval { |
| default: |
| print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") |
| dumpgstatus(gp) |
| throw("casfrom_Gscanstatus:top gp->status is not in scan state") |
| case _Gscanrunnable, |
| _Gscanwaiting, |
| _Gscanrunning, |
| _Gscansyscall, |
| _Gscanpreempted: |
| if newval == oldval&^_Gscan { |
| success = atomic.Cas(&gp.atomicstatus, oldval, newval) |
| } |
| } |
| if !success { |
| print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") |
| dumpgstatus(gp) |
| throw("casfrom_Gscanstatus: gp->status is not in scan state") |
| } |
| releaseLockRank(lockRankGscan) |
| } |
| |
| // This will return false if the gp is not in the expected status and the cas fails. |
| // This acts like a lock acquire while the casfromgstatus acts like a lock release. |
| func castogscanstatus(gp *g, oldval, newval uint32) bool { |
| switch oldval { |
| case _Grunnable, |
| _Grunning, |
| _Gwaiting, |
| _Gsyscall: |
| if newval == oldval|_Gscan { |
| r := atomic.Cas(&gp.atomicstatus, oldval, newval) |
| if r { |
| acquireLockRank(lockRankGscan) |
| } |
| return r |
| |
| } |
| } |
| print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") |
| throw("castogscanstatus") |
| panic("not reached") |
| } |
| |
| // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus |
| // and casfrom_Gscanstatus instead. |
| // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that |
| // put it in the Gscan state is finished. |
| //go:nosplit |
| func casgstatus(gp *g, oldval, newval uint32) { |
| if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { |
| systemstack(func() { |
| print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") |
| throw("casgstatus: bad incoming values") |
| }) |
| } |
| |
| acquireLockRank(lockRankGscan) |
| releaseLockRank(lockRankGscan) |
| |
| // See https://golang.org/cl/21503 for justification of the yield delay. |
| const yieldDelay = 5 * 1000 |
| var nextYield int64 |
| |
| // loop if gp->atomicstatus is in a scan state giving |
| // GC time to finish and change the state to oldval. |
| for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { |
| if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { |
| throw("casgstatus: waiting for Gwaiting but is Grunnable") |
| } |
| if i == 0 { |
| nextYield = nanotime() + yieldDelay |
| } |
| if nanotime() < nextYield { |
| for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { |
| procyield(1) |
| } |
| } else { |
| osyield() |
| nextYield = nanotime() + yieldDelay/2 |
| } |
| } |
| } |
| |
| // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted. |
| // |
| // TODO(austin): This is the only status operation that both changes |
| // the status and locks the _Gscan bit. Rethink this. |
| func casGToPreemptScan(gp *g, old, new uint32) { |
| if old != _Grunning || new != _Gscan|_Gpreempted { |
| throw("bad g transition") |
| } |
| acquireLockRank(lockRankGscan) |
| for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) { |
| } |
| } |
| |
| // casGFromPreempted attempts to transition gp from _Gpreempted to |
| // _Gwaiting. If successful, the caller is responsible for |
| // re-scheduling gp. |
| func casGFromPreempted(gp *g, old, new uint32) bool { |
| if old != _Gpreempted || new != _Gwaiting { |
| throw("bad g transition") |
| } |
| return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting) |
| } |
| |
| // stopTheWorld stops all P's from executing goroutines, interrupting |
| // all goroutines at GC safe points and records reason as the reason |
| // for the stop. On return, only the current goroutine's P is running. |
| // stopTheWorld must not be called from a system stack and the caller |
| // must not hold worldsema. The caller must call startTheWorld when |
| // other P's should resume execution. |
| // |
| // stopTheWorld is safe for multiple goroutines to call at the |
| // same time. Each will execute its own stop, and the stops will |
| // be serialized. |
| // |
| // This is also used by routines that do stack dumps. If the system is |
| // in panic or being exited, this may not reliably stop all |
| // goroutines. |
| func stopTheWorld(reason string) { |
| semacquire(&worldsema) |
| gp := getg() |
| gp.m.preemptoff = reason |
| systemstack(func() { |
| // Mark the goroutine which called stopTheWorld preemptible so its |
| // stack may be scanned. |
| // This lets a mark worker scan us while we try to stop the world |
| // since otherwise we could get in a mutual preemption deadlock. |
| // We must not modify anything on the G stack because a stack shrink |
| // may occur. A stack shrink is otherwise OK though because in order |
| // to return from this function (and to leave the system stack) we |
| // must have preempted all goroutines, including any attempting |
| // to scan our stack, in which case, any stack shrinking will |
| // have already completed by the time we exit. |
| casgstatus(gp, _Grunning, _Gwaiting) |
| stopTheWorldWithSema() |
| casgstatus(gp, _Gwaiting, _Grunning) |
| }) |
| } |
| |
| // startTheWorld undoes the effects of stopTheWorld. |
| func startTheWorld() { |
| systemstack(func() { startTheWorldWithSema(false) }) |
| |
| // worldsema must be held over startTheWorldWithSema to ensure |
| // gomaxprocs cannot change while worldsema is held. |
| // |
| // Release worldsema with direct handoff to the next waiter, but |
| // acquirem so that semrelease1 doesn't try to yield our time. |
| // |
| // Otherwise if e.g. ReadMemStats is being called in a loop, |
| // it might stomp on other attempts to stop the world, such as |
| // for starting or ending GC. The operation this blocks is |
| // so heavy-weight that we should just try to be as fair as |
| // possible here. |
| // |
| // We don't want to just allow us to get preempted between now |
| // and releasing the semaphore because then we keep everyone |
| // (including, for example, GCs) waiting longer. |
| mp := acquirem() |
| mp.preemptoff = "" |
| semrelease1(&worldsema, true, 0) |
| releasem(mp) |
| } |
| |
| // stopTheWorldGC has the same effect as stopTheWorld, but blocks |
| // until the GC is not running. It also blocks a GC from starting |
| // until startTheWorldGC is called. |
| func stopTheWorldGC(reason string) { |
| semacquire(&gcsema) |
| stopTheWorld(reason) |
| } |
| |
| // startTheWorldGC undoes the effects of stopTheWorldGC. |
| func startTheWorldGC() { |
| startTheWorld() |
| semrelease(&gcsema) |
| } |
| |
| // Holding worldsema grants an M the right to try to stop the world. |
| var worldsema uint32 = 1 |
| |
| // Holding gcsema grants the M the right to block a GC, and blocks |
| // until the current GC is done. In particular, it prevents gomaxprocs |
| // from changing concurrently. |
| // |
| // TODO(mknyszek): Once gomaxprocs and the execution tracer can handle |
| // being changed/enabled during a GC, remove this. |
| var gcsema uint32 = 1 |
| |
| // stopTheWorldWithSema is the core implementation of stopTheWorld. |
| // The caller is responsible for acquiring worldsema and disabling |
| // preemption first and then should stopTheWorldWithSema on the system |
| // stack: |
| // |
| // semacquire(&worldsema, 0) |
| // m.preemptoff = "reason" |
| // systemstack(stopTheWorldWithSema) |
| // |
| // When finished, the caller must either call startTheWorld or undo |
| // these three operations separately: |
| // |
| // m.preemptoff = "" |
| // systemstack(startTheWorldWithSema) |
| // semrelease(&worldsema) |
| // |
| // It is allowed to acquire worldsema once and then execute multiple |
| // startTheWorldWithSema/stopTheWorldWithSema pairs. |
| // Other P's are able to execute between successive calls to |
| // startTheWorldWithSema and stopTheWorldWithSema. |
| // Holding worldsema causes any other goroutines invoking |
| // stopTheWorld to block. |
| func stopTheWorldWithSema() { |
| _g_ := getg() |
| |
| // If we hold a lock, then we won't be able to stop another M |
| // that is blocked trying to acquire the lock. |
| if _g_.m.locks > 0 { |
| throw("stopTheWorld: holding locks") |
| } |
| |
| lock(&sched.lock) |
| sched.stopwait = gomaxprocs |
| atomic.Store(&sched.gcwaiting, 1) |
| preemptall() |
| // stop current P |
| _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. |
| sched.stopwait-- |
| // try to retake all P's in Psyscall status |
| for _, p := range allp { |
| s := p.status |
| if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { |
| if trace.enabled { |
| traceGoSysBlock(p) |
| traceProcStop(p) |
| } |
| p.syscalltick++ |
| sched.stopwait-- |
| } |
| } |
| // stop idle P's |
| for { |
| p := pidleget() |
| if p == nil { |
| break |
| } |
| p.status = _Pgcstop |
| sched.stopwait-- |
| } |
| wait := sched.stopwait > 0 |
| unlock(&sched.lock) |
| |
| // wait for remaining P's to stop voluntarily |
| if wait { |
| for { |
| // wait for 100us, then try to re-preempt in case of any races |
| if notetsleep(&sched.stopnote, 100*1000) { |
| noteclear(&sched.stopnote) |
| break |
| } |
| preemptall() |
| } |
| } |
| |
| // sanity checks |
| bad := "" |
| if sched.stopwait != 0 { |
| bad = "stopTheWorld: not stopped (stopwait != 0)" |
| } else { |
| for _, p := range allp { |
| if p.status != _Pgcstop { |
| bad = "stopTheWorld: not stopped (status != _Pgcstop)" |
| } |
| } |
| } |
| if atomic.Load(&freezing) != 0 { |
| // Some other thread is panicking. This can cause the |
| // sanity checks above to fail if the panic happens in |
| // the signal handler on a stopped thread. Either way, |
| // we should halt this thread. |
| lock(&deadlock) |
| lock(&deadlock) |
| } |
| if bad != "" { |
| throw(bad) |
| } |
| |
| worldStopped() |
| } |
| |
| func startTheWorldWithSema(emitTraceEvent bool) int64 { |
| assertWorldStopped() |
| |
| mp := acquirem() // disable preemption because it can be holding p in a local var |
| if netpollinited() { |
| list := netpoll(0) // non-blocking |
| injectglist(&list) |
| } |
| lock(&sched.lock) |
| |
| procs := gomaxprocs |
| if newprocs != 0 { |
| procs = newprocs |
| newprocs = 0 |
| } |
| p1 := procresize(procs) |
| sched.gcwaiting = 0 |
| if sched.sysmonwait != 0 { |
| sched.sysmonwait = 0 |
| notewakeup(&sched.sysmonnote) |
| } |
| unlock(&sched.lock) |
| |
| worldStarted() |
| |
| for p1 != nil { |
| p := p1 |
| p1 = p1.link.ptr() |
| if p.m != 0 { |
| mp := p.m.ptr() |
| p.m = 0 |
| if mp.nextp != 0 { |
| throw("startTheWorld: inconsistent mp->nextp") |
| } |
| mp.nextp.set(p) |
| notewakeup(&mp.park) |
| } else { |
| // Start M to run P. Do not start another M below. |
| newm(nil, p, -1) |
| } |
| } |
| |
| // Capture start-the-world time before doing clean-up tasks. |
| startTime := nanotime() |
| if emitTraceEvent { |
| traceGCSTWDone() |
| } |
| |
| // Wakeup an additional proc in case we have excessive runnable goroutines |
| // in local queues or in the global queue. If we don't, the proc will park itself. |
| // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. |
| wakep() |
| |
| releasem(mp) |
| |
| return startTime |
| } |
| |
| // First function run by a new goroutine. |
| // This is passed to makecontext. |
| func kickoff() { |
| gp := getg() |
| |
| if gp.traceback != 0 { |
| gtraceback(gp) |
| } |
| |
| fv := gp.entry |
| param := gp.param |
| |
| // When running on the g0 stack we can wind up here without a p, |
| // for example from mcall(exitsyscall0) in exitsyscall, in |
| // which case we can not run a write barrier. |
| // It is also possible for us to get here from the systemstack |
| // call in wbBufFlush, at which point the write barrier buffer |
| // is full and we can not run a write barrier. |
| // Setting gp.entry = nil or gp.param = nil will try to run a |
| // write barrier, so if we are on the g0 stack due to mcall |
| // (systemstack calls mcall) then clear the field using uintptr. |
| // This is OK when gp.param is gp.m.curg, as curg will be kept |
| // alive elsewhere, and gp.entry always points into g, or |
| // to a statically allocated value, or (in the case of mcall) |
| // to the stack. |
| if gp == gp.m.g0 && gp.param == unsafe.Pointer(gp.m.curg) { |
| *(*uintptr)(unsafe.Pointer(&gp.entry)) = 0 |
| *(*uintptr)(unsafe.Pointer(&gp.param)) = 0 |
| } else if gp.m.p == 0 { |
| throw("no p in kickoff") |
| } else { |
| gp.entry = nil |
| gp.param = nil |
| } |
| |
| // Record the entry SP to help stack scan. |
| gp.entrysp = getsp() |
| |
| fv(param) |
| goexit1() |
| } |
| |
| func mstart1() { |
| _g_ := getg() |
| |
| if _g_ != _g_.m.g0 { |
| throw("bad runtime·mstart") |
| } |
| |
| asminit() |
| |
| // Install signal handlers; after minit so that minit can |
| // prepare the thread to be able to handle the signals. |
| // For gccgo minit was called by C code. |
| if _g_.m == &m0 { |
| mstartm0() |
| } |
| |
| if fn := _g_.m.mstartfn; fn != nil { |
| fn() |
| } |
| |
| if _g_.m != &m0 { |
| acquirep(_g_.m.nextp.ptr()) |
| _g_.m.nextp = 0 |
| } |
| schedule() |
| } |
| |
| // mstartm0 implements part of mstart1 that only runs on the m0. |
| // |
| // Write barriers are allowed here because we know the GC can't be |
| // running yet, so they'll be no-ops. |
| // |
| //go:yeswritebarrierrec |
| func mstartm0() { |
| // Create an extra M for callbacks on threads not created by Go. |
| // An extra M is also needed on Windows for callbacks created by |
| // syscall.NewCallback. See issue #6751 for details. |
| if (iscgo || GOOS == "windows") && !cgoHasExtraM { |
| cgoHasExtraM = true |
| newextram() |
| } |
| initsig(false) |
| } |
| |
| // mPark causes a thread to park itself - temporarily waking for |
| // fixups but otherwise waiting to be fully woken. This is the |
| // only way that m's should park themselves. |
| //go:nosplit |
| func mPark() { |
| g := getg() |
| for { |
| notesleep(&g.m.park) |
| noteclear(&g.m.park) |
| if !mDoFixup() { |
| return |
| } |
| } |
| } |
| |
| // mexit tears down and exits the current thread. |
| // |
| // Don't call this directly to exit the thread, since it must run at |
| // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to |
| // unwind the stack to the point that exits the thread. |
| // |
| // It is entered with m.p != nil, so write barriers are allowed. It |
| // will release the P before exiting. |
| // |
| //go:yeswritebarrierrec |
| func mexit(osStack bool) { |
| g := getg() |
| m := g.m |
| |
| if m == &m0 { |
| // This is the main thread. Just wedge it. |
| // |
| // On Linux, exiting the main thread puts the process |
| // into a non-waitable zombie state. On Plan 9, |
| // exiting the main thread unblocks wait even though |
| // other threads are still running. On Solaris we can |
| // neither exitThread nor return from mstart. Other |
| // bad things probably happen on other platforms. |
| // |
| // We could try to clean up this M more before wedging |
| // it, but that complicates signal handling. |
| handoffp(releasep()) |
| lock(&sched.lock) |
| sched.nmfreed++ |
| checkdead() |
| unlock(&sched.lock) |
| mPark() |
| throw("locked m0 woke up") |
| } |
| |
| sigblock(true) |
| unminit() |
| |
| // Free the gsignal stack. |
| if m.gsignal != nil { |
| stackfree(m.gsignal) |
| // On some platforms, when calling into VDSO (e.g. nanotime) |
| // we store our g on the gsignal stack, if there is one. |
| // Now the stack is freed, unlink it from the m, so we |
| // won't write to it when calling VDSO code. |
| m.gsignal = nil |
| } |
| |
| // Remove m from allm. |
| lock(&sched.lock) |
| for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { |
| if *pprev == m { |
| *pprev = m.alllink |
| goto found |
| } |
| } |
| throw("m not found in allm") |
| found: |
| if !osStack { |
| // Delay reaping m until it's done with the stack. |
| // |
| // If this is using an OS stack, the OS will free it |
| // so there's no need for reaping. |
| atomic.Store(&m.freeWait, 1) |
| // Put m on the free list, though it will not be reaped until |
| // freeWait is 0. Note that the free list must not be linked |
| // through alllink because some functions walk allm without |
| // locking, so may be using alllink. |
| m.freelink = sched.freem |
| sched.freem = m |
| } |
| unlock(&sched.lock) |
| |
| // Release the P. |
| handoffp(releasep()) |
| // After this point we must not have write barriers. |
| |
| // Invoke the deadlock detector. This must happen after |
| // handoffp because it may have started a new M to take our |
| // P's work. |
| lock(&sched.lock) |
| sched.nmfreed++ |
| checkdead() |
| unlock(&sched.lock) |
| |
| if GOOS == "darwin" || GOOS == "ios" { |
| // Make sure pendingPreemptSignals is correct when an M exits. |
| // For #41702. |
| if atomic.Load(&m.signalPending) != 0 { |
| atomic.Xadd(&pendingPreemptSignals, -1) |
| } |
| } |
| |
| // Destroy all allocated resources. After this is called, we may no |
| // longer take any locks. |
| mdestroy(m) |
| |
| if osStack { |
| // Return from mstart and let the system thread |
| // library free the g0 stack and terminate the thread. |
| return |
| } |
| |
| // mstart is the thread's entry point, so there's nothing to |
| // return to. Exit the thread directly. exitThread will clear |
| // m.freeWait when it's done with the stack and the m can be |
| // reaped. |
| exitThread(&m.freeWait) |
| } |
| |
| // forEachP calls fn(p) for every P p when p reaches a GC safe point. |
| // If a P is currently executing code, this will bring the P to a GC |
| // safe point and execute fn on that P. If the P is not executing code |
| // (it is idle or in a syscall), this will call fn(p) directly while |
| // preventing the P from exiting its state. This does not ensure that |
| // fn will run on every CPU executing Go code, but it acts as a global |
| // memory barrier. GC uses this as a "ragged barrier." |
| // |
| // The caller must hold worldsema. |
| // |
| //go:systemstack |
| func forEachP(fn func(*p)) { |
| mp := acquirem() |
| _p_ := getg().m.p.ptr() |
| |
| lock(&sched.lock) |
| if sched.safePointWait != 0 { |
| throw("forEachP: sched.safePointWait != 0") |
| } |
| sched.safePointWait = gomaxprocs - 1 |
| sched.safePointFn = fn |
| |
| // Ask all Ps to run the safe point function. |
| for _, p := range allp { |
| if p != _p_ { |
| atomic.Store(&p.runSafePointFn, 1) |
| } |
| } |
| preemptall() |
| |
| // Any P entering _Pidle or _Psyscall from now on will observe |
| // p.runSafePointFn == 1 and will call runSafePointFn when |
| // changing its status to _Pidle/_Psyscall. |
| |
| // Run safe point function for all idle Ps. sched.pidle will |
| // not change because we hold sched.lock. |
| for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { |
| if atomic.Cas(&p.runSafePointFn, 1, 0) { |
| fn(p) |
| sched.safePointWait-- |
| } |
| } |
| |
| wait := sched.safePointWait > 0 |
| unlock(&sched.lock) |
| |
| // Run fn for the current P. |
| fn(_p_) |
| |
| // Force Ps currently in _Psyscall into _Pidle and hand them |
| // off to induce safe point function execution. |
| for _, p := range allp { |
| s := p.status |
| if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { |
| if trace.enabled { |
| traceGoSysBlock(p) |
| traceProcStop(p) |
| } |
| p.syscalltick++ |
| handoffp(p) |
| } |
| } |
| |
| // Wait for remaining Ps to run fn. |
| if wait { |
| for { |
| // Wait for 100us, then try to re-preempt in |
| // case of any races. |
| // |
| // Requires system stack. |
| if notetsleep(&sched.safePointNote, 100*1000) { |
| noteclear(&sched.safePointNote) |
| break |
| } |
| preemptall() |
| } |
| } |
| if sched.safePointWait != 0 { |
| throw("forEachP: not done") |
| } |
| for _, p := range allp { |
| if p.runSafePointFn != 0 { |
| throw("forEachP: P did not run fn") |
| } |
| } |
| |
| lock(&sched.lock) |
| sched.safePointFn = nil |
| unlock(&sched.lock) |
| releasem(mp) |
| } |
| |
| // runSafePointFn runs the safe point function, if any, for this P. |
| // This should be called like |
| // |
| // if getg().m.p.runSafePointFn != 0 { |
| // runSafePointFn() |
| // } |
| // |
| // runSafePointFn must be checked on any transition in to _Pidle or |
| // _Psyscall to avoid a race where forEachP sees that the P is running |
| // just before the P goes into _Pidle/_Psyscall and neither forEachP |
| // nor the P run the safe-point function. |
| func runSafePointFn() { |
| p := getg().m.p.ptr() |
| // Resolve the race between forEachP running the safe-point |
| // function on this P's behalf and this P running the |
| // safe-point function directly. |
| if !atomic.Cas(&p.runSafePointFn, 1, 0) { |
| return |
| } |
| sched.safePointFn(p) |
| lock(&sched.lock) |
| sched.safePointWait-- |
| if sched.safePointWait == 0 { |
| notewakeup(&sched.safePointNote) |
| } |
| unlock(&sched.lock) |
| } |
| |
| // Allocate a new m unassociated with any thread. |
| // Can use p for allocation context if needed. |
| // fn is recorded as the new m's m.mstartfn. |
| // id is optional pre-allocated m ID. Omit by passing -1. |
| // |
| // This function is allowed to have write barriers even if the caller |
| // isn't because it borrows _p_. |
| // |
| //go:yeswritebarrierrec |
| func allocm(_p_ *p, fn func(), id int64, allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) { |
| _g_ := getg() |
| acquirem() // disable GC because it can be called from sysmon |
| if _g_.m.p == 0 { |
| acquirep(_p_) // temporarily borrow p for mallocs in this function |
| } |
| |
| // Release the free M list. We need to do this somewhere and |
| // this may free up a stack we can use. |
| if sched.freem != nil { |
| lock(&sched.lock) |
| var newList *m |
| for freem := sched.freem; freem != nil; { |
| if freem.freeWait != 0 { |
| next := freem.freelink |
| freem.freelink = newList |
| newList = freem |
| freem = next |
| continue |
| } |
| // stackfree must be on the system stack, but allocm is |
| // reachable off the system stack transitively from |
| // startm. |
| systemstack(func() { |
| stackfree(freem.g0) |
| }) |
| freem = freem.freelink |
| } |
| sched.freem = newList |
| unlock(&sched.lock) |
| } |
| |
| mp = new(m) |
| mp.mstartfn = fn |
| mcommoninit(mp, id) |
| |
| mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize) |
| mp.g0.m = mp |
| |
| if _p_ == _g_.m.p.ptr() { |
| releasep() |
| } |
| releasem(_g_.m) |
| |
| return mp, g0Stack, g0StackSize |
| } |
| |
| // needm is called when a cgo callback happens on a |
| // thread without an m (a thread not created by Go). |
| // In this case, needm is expected to find an m to use |
| // and return with m, g initialized correctly. |
| // Since m and g are not set now (likely nil, but see below) |
| // needm is limited in what routines it can call. In particular |
| // it can only call nosplit functions (textflag 7) and cannot |
| // do any scheduling that requires an m. |
| // |
| // In order to avoid needing heavy lifting here, we adopt |
| // the following strategy: there is a stack of available m's |
| // that can be stolen. Using compare-and-swap |
| // to pop from the stack has ABA races, so we simulate |
| // a lock by doing an exchange (via Casuintptr) to steal the stack |
| // head and replace the top pointer with MLOCKED (1). |
| // This serves as a simple spin lock that we can use even |
| // without an m. The thread that locks the stack in this way |
| // unlocks the stack by storing a valid stack head pointer. |
| // |
| // In order to make sure that there is always an m structure |
| // available to be stolen, we maintain the invariant that there |
| // is always one more than needed. At the beginning of the |
| // program (if cgo is in use) the list is seeded with a single m. |
| // If needm finds that it has taken the last m off the list, its job |
| // is - once it has installed its own m so that it can do things like |
| // allocate memory - to create a spare m and put it on the list. |
| // |
| // Each of these extra m's also has a g0 and a curg that are |
| // pressed into service as the scheduling stack and current |
| // goroutine for the duration of the cgo callback. |
| // |
| // When the callback is done with the m, it calls dropm to |
| // put the m back on the list. |
| //go:nosplit |
| func needm() { |
| if (iscgo || GOOS == "windows") && !cgoHasExtraM { |
| // Can happen if C/C++ code calls Go from a global ctor. |
| // Can also happen on Windows if a global ctor uses a |
| // callback created by syscall.NewCallback. See issue #6751 |
| // for details. |
| // |
| // Can not throw, because scheduler is not initialized yet. |
| write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) |
| exit(1) |
| } |
| |
| // Save and block signals before getting an M. |
| // The signal handler may call needm itself, |
| // and we must avoid a deadlock. Also, once g is installed, |
| // any incoming signals will try to execute, |
| // but we won't have the sigaltstack settings and other data |
| // set up appropriately until the end of minit, which will |
| // unblock the signals. This is the same dance as when |
| // starting a new m to run Go code via newosproc. |
| var sigmask sigset |
| sigsave(&sigmask) |
| sigblock(false) |
| |
| // Lock extra list, take head, unlock popped list. |
| // nilokay=false is safe here because of the invariant above, |
| // that the extra list always contains or will soon contain |
| // at least one m. |
| mp := lockextra(false) |
| |
| // Set needextram when we've just emptied the list, |
| // so that the eventual call into cgocallbackg will |
| // allocate a new m for the extra list. We delay the |
| // allocation until then so that it can be done |
| // after exitsyscall makes sure it is okay to be |
| // running at all (that is, there's no garbage collection |
| // running right now). |
| mp.needextram = mp.schedlink == 0 |
| extraMCount-- |
| unlockextra(mp.schedlink.ptr()) |
| |
| // Store the original signal mask for use by minit. |
| mp.sigmask = sigmask |
| |
| // Install g (= m->curg). |
| setg(mp.curg) |
| |
| // Initialize this thread to use the m. |
| asminit() |
| minit() |
| |
| setGContext() |
| |
| // mp.curg is now a real goroutine. |
| casgstatus(mp.curg, _Gdead, _Gsyscall) |
| atomic.Xadd(&sched.ngsys, -1) |
| } |
| |
| var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") |
| |
| // newextram allocates m's and puts them on the extra list. |
| // It is called with a working local m, so that it can do things |
| // like call schedlock and allocate. |
| func newextram() { |
| c := atomic.Xchg(&extraMWaiters, 0) |
| if c > 0 { |
| for i := uint32(0); i < c; i++ { |
| oneNewExtraM() |
| } |
| } else { |
| // Make sure there is at least one extra M. |
| mp := lockextra(true) |
| unlockextra(mp) |
| if mp == nil { |
| oneNewExtraM() |
| } |
| } |
| } |
| |
| // oneNewExtraM allocates an m and puts it on the extra list. |
| func oneNewExtraM() { |
| // Create extra goroutine locked to extra m. |
| // The goroutine is the context in which the cgo callback will run. |
| // The sched.pc will never be returned to, but setting it to |
| // goexit makes clear to the traceback routines where |
| // the goroutine stack ends. |
| mp, g0SP, g0SPSize := allocm(nil, nil, -1, true) |
| gp := malg(true, false, nil, nil) |
| // malg returns status as _Gidle. Change to _Gdead before |
| // adding to allg where GC can see it. We use _Gdead to hide |
| // this from tracebacks and stack scans since it isn't a |
| // "real" goroutine until needm grabs it. |
| casgstatus(gp, _Gidle, _Gdead) |
| gp.m = mp |
| mp.curg = gp |
| mp.lockedInt++ |
| mp.lockedg.set(gp) |
| gp.lockedm.set(mp) |
| gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) |
| // put on allg for garbage collector |
| allgadd(gp) |
| |
| // The context for gp will be set up in needm. |
| // Here we need to set the context for g0. |
| makeGContext(mp.g0, g0SP, g0SPSize) |
| |
| // gp is now on the allg list, but we don't want it to be |
| // counted by gcount. It would be more "proper" to increment |
| // sched.ngfree, but that requires locking. Incrementing ngsys |
| // has the same effect. |
| atomic.Xadd(&sched.ngsys, +1) |
| |
| // Add m to the extra list. |
| mnext := lockextra(true) |
| mp.schedlink.set(mnext) |
| extraMCount++ |
| unlockextra(mp) |
| } |
| |
| // dropm is called when a cgo callback has called needm but is now |
| // done with the callback and returning back into the non-Go thread. |
| // It puts the current m back onto the extra list. |
| // |
| // The main expense here is the call to signalstack to release the |
| // m's signal stack, and then the call to needm on the next callback |
| // from this thread. It is tempting to try to save the m for next time, |
| // which would eliminate both these costs, but there might not be |
| // a next time: the current thread (which Go does not control) might exit. |
| // If we saved the m for that thread, there would be an m leak each time |
| // such a thread exited. Instead, we acquire and release an m on each |
| // call. These should typically not be scheduling operations, just a few |
| // atomics, so the cost should be small. |
| // |
| // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread |
| // variable using pthread_key_create. Unlike the pthread keys we already use |
| // on OS X, this dummy key would never be read by Go code. It would exist |
| // only so that we could register at thread-exit-time destructor. |
| // That destructor would put the m back onto the extra list. |
| // This is purely a performance optimization. The current version, |
| // in which dropm happens on each cgo call, is still correct too. |
| // We may have to keep the current version on systems with cgo |
| // but without pthreads, like Windows. |
| // |
| // CgocallBackDone calls this after releasing p, so no write barriers. |
| //go:nowritebarrierrec |
| func dropm() { |
| // Clear m and g, and return m to the extra list. |
| // After the call to setg we can only call nosplit functions |
| // with no pointer manipulation. |
| mp := getg().m |
| |
| // Return mp.curg to dead state. |
| casgstatus(mp.curg, _Gsyscall, _Gdead) |
| mp.curg.preemptStop = false |
| atomic.Xadd(&sched.ngsys, +1) |
| |
| // Block signals before unminit. |
| // Unminit unregisters the signal handling stack (but needs g on some systems). |
| // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. |
| // It's important not to try to handle a signal between those two steps. |
| sigmask := mp.sigmask |
| sigblock(false) |
| unminit() |
| |
| // gccgo sets the stack to Gdead here, because the splitstack |
| // context is not initialized. |
| atomic.Store(&mp.curg.atomicstatus, _Gdead) |
| mp.curg.gcstack = 0 |
| mp.curg.gcnextsp = 0 |
| |
| mnext := lockextra(true) |
| extraMCount++ |
| mp.schedlink.set(mnext) |
| |
| setg(nil) |
| |
| // Commit the release of mp. |
| unlockextra(mp) |
| |
| msigrestore(sigmask) |
| } |
| |
| // A helper function for EnsureDropM. |
| func getm() uintptr { |
| return uintptr(unsafe.Pointer(getg().m)) |
| } |
| |
| var extram uintptr |
| var extraMCount uint32 // Protected by lockextra |
| var extraMWaiters uint32 |
| |
| // lockextra locks the extra list and returns the list head. |
| // The caller must unlock the list by storing a new list head |
| // to extram. If nilokay is true, then lockextra will |
| // return a nil list head if that's what it finds. If nilokay is false, |
| // lockextra will keep waiting until the list head is no longer nil. |
| //go:nosplit |
| //go:nowritebarrierrec |
| func lockextra(nilokay bool) *m { |
| const locked = 1 |
| |
| incr := false |
| for { |
| old := atomic.Loaduintptr(&extram) |
| if old == locked { |
| osyield() |
| continue |
| } |
| if old == 0 && !nilokay { |
| if !incr { |
| // Add 1 to the number of threads |
| // waiting for an M. |
| // This is cleared by newextram. |
| atomic.Xadd(&extraMWaiters, 1) |
| incr = true |
| } |
| usleep(1) |
| continue |
| } |
| if atomic.Casuintptr(&extram, old, locked) { |
| return (*m)(unsafe.Pointer(old)) |
| } |
| osyield() |
| continue |
| } |
| } |
| |
| //go:nosplit |
| //go:nowritebarrierrec |
| func unlockextra(mp *m) { |
| atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) |
| } |
| |
| // execLock serializes exec and clone to avoid bugs or unspecified behaviour |
| // around exec'ing while creating/destroying threads. See issue #19546. |
| var execLock rwmutex |
| |
| // newmHandoff contains a list of m structures that need new OS threads. |
| // This is used by newm in situations where newm itself can't safely |
| // start an OS thread. |
| var newmHandoff struct { |
| lock mutex |
| |
| // newm points to a list of M structures that need new OS |
| // threads. The list is linked through m.schedlink. |
| newm muintptr |
| |
| // waiting indicates that wake needs to be notified when an m |
| // is put on the list. |
| waiting bool |
| wake note |
| |
| // haveTemplateThread indicates that the templateThread has |
| // been started. This is not protected by lock. Use cas to set |
| // to 1. |
| haveTemplateThread uint32 |
| } |
| |
| // Create a new m. It will start off with a call to fn, or else the scheduler. |
| // fn needs to be static and not a heap allocated closure. |
| // May run with m.p==nil, so write barriers are not allowed. |
| // |
| // id is optional pre-allocated m ID. Omit by passing -1. |
| //go:nowritebarrierrec |
| func newm(fn func(), _p_ *p, id int64) { |
| mp, _, _ := allocm(_p_, fn, id, false) |
| mp.doesPark = (_p_ != nil) |
| mp.nextp.set(_p_) |
| mp.sigmask = initSigmask |
| if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { |
| // We're on a locked M or a thread that may have been |
| // started by C. The kernel state of this thread may |
| // be strange (the user may have locked it for that |
| // purpose). We don't want to clone that into another |
| // thread. Instead, ask a known-good thread to create |
| // the thread for us. |
| // |
| // This is disabled on Plan 9. See golang.org/issue/22227. |
| // |
| // TODO: This may be unnecessary on Windows, which |
| // doesn't model thread creation off fork. |
| lock(&newmHandoff.lock) |
| if newmHandoff.haveTemplateThread == 0 { |
| throw("on a locked thread with no template thread") |
| } |
| mp.schedlink = newmHandoff.newm |
| newmHandoff.newm.set(mp) |
| if newmHandoff.waiting { |
| newmHandoff.waiting = false |
| notewakeup(&newmHandoff.wake) |
| } |
| unlock(&newmHandoff.lock) |
| return |
| } |
| newm1(mp) |
| } |
| |
| func newm1(mp *m) { |
| execLock.rlock() // Prevent process clone. |
| newosproc(mp) |
| execLock.runlock() |
| } |
| |
| // startTemplateThread starts the template thread if it is not already |
| // running. |
| // |
| // The calling thread must itself be in a known-good state. |
| func startTemplateThread() { |
| if GOARCH == "wasm" { // no threads on wasm yet |
| return |
| } |
| |
| // Disable preemption to guarantee that the template thread will be |
| // created before a park once haveTemplateThread is set. |
| mp := acquirem() |
| if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { |
| releasem(mp) |
| return |
| } |
| newm(templateThread, nil, -1) |
| releasem(mp) |
| } |
| |
| // mFixupRace is used to temporarily borrow the race context from the |
| // coordinating m during a syscall_runtime_doAllThreadsSyscall and |
| // loan it out to each of the m's of the runtime so they can execute a |
| // mFixup.fn in that context. |
| var mFixupRace struct { |
| lock mutex |
| ctx uintptr |
| } |
| |
| // mDoFixup runs any outstanding fixup function for the running m. |
| // Returns true if a fixup was outstanding and actually executed. |
| // |
| //go:nosplit |
| func mDoFixup() bool { |
| _g_ := getg() |
| lock(&_g_.m.mFixup.lock) |
| fn := _g_.m.mFixup.fn |
| if fn != nil { |
| if gcphase != _GCoff { |
| // We can't have a write barrier in this |
| // context since we may not have a P, but we |
| // clear fn to signal that we've executed the |
| // fixup. As long as fn is kept alive |
| // elsewhere, technically we should have no |
| // issues with the GC, but fn is likely |
| // generated in a different package altogether |
| // that may change independently. Just assert |
| // the GC is off so this lack of write barrier |
| // is more obviously safe. |
| throw("GC must be disabled to protect validity of fn value") |
| } |
| *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0 |
| fn(false) |
| } |
| unlock(&_g_.m.mFixup.lock) |
| return fn != nil |
| } |
| |
| // templateThread is a thread in a known-good state that exists solely |
| // to start new threads in known-good states when the calling thread |
| // may not be in a good state. |
| // |
| // Many programs never need this, so templateThread is started lazily |
| // when we first enter a state that might lead to running on a thread |
| // in an unknown state. |
| // |
| // templateThread runs on an M without a P, so it must not have write |
| // barriers. |
| // |
| //go:nowritebarrierrec |
| func templateThread() { |
| lock(&sched.lock) |
| sched.nmsys++ |
| checkdead() |
| unlock(&sched.lock) |
| |
| for { |
| lock(&newmHandoff.lock) |
| for newmHandoff.newm != 0 { |
| newm := newmHandoff.newm.ptr() |
| newmHandoff.newm = 0 |
| unlock(&newmHandoff.lock) |
| for newm != nil { |
| next := newm.schedlink.ptr() |
| newm.schedlink = 0 |
| newm1(newm) |
| newm = next |
| } |
| lock(&newmHandoff.lock) |
| } |
| newmHandoff.waiting = true |
| noteclear(&newmHandoff.wake) |
| unlock(&newmHandoff.lock) |
| notesleep(&newmHandoff.wake) |
| mDoFixup() |
| } |
| } |
| |
| // Stops execution of the current m until new work is available. |
| // Returns with acquired P. |
| func stopm() { |
| _g_ := getg() |
| |
| if _g_.m.locks != 0 { |
| throw("stopm holding locks") |
| } |
| if _g_.m.p != 0 { |
| throw("stopm holding p") |
| } |
| if _g_.m.spinning { |
| throw("stopm spinning") |
| } |
| |
| lock(&sched.lock) |
| mput(_g_.m) |
| unlock(&sched.lock) |
| mPark() |
| acquirep(_g_.m.nextp.ptr()) |
| _g_.m.nextp = 0 |
| } |
| |
| func mspinning() { |
| // startm's caller incremented nmspinning. Set the new M's spinning. |
| getg().m.spinning = true |
| } |
| |
| // Schedules some M to run the p (creates an M if necessary). |
| // If p==nil, tries to get an idle P, if no idle P's does nothing. |
| // May run with m.p==nil, so write barriers are not allowed. |
| // If spinning is set, the caller has incremented nmspinning and startm will |
| // either decrement nmspinning or set m.spinning in the newly started M. |
| // |
| // Callers passing a non-nil P must call from a non-preemptible context. See |
| // comment on acquirem below. |
| // |
| // Must not have write barriers because this may be called without a P. |
| //go:nowritebarrierrec |
| func startm(_p_ *p, spinning bool) { |
| // Disable preemption. |
| // |
| // Every owned P must have an owner that will eventually stop it in the |
| // event of a GC stop request. startm takes transient ownership of a P |
| // (either from argument or pidleget below) and transfers ownership to |
| // a started M, which will be responsible for performing the stop. |
| // |
| // Preemption must be disabled during this transient ownership, |
| // otherwise the P this is running on may enter GC stop while still |
| // holding the transient P, leaving that P in limbo and deadlocking the |
| // STW. |
| // |
| // Callers passing a non-nil P must already be in non-preemptible |
| // context, otherwise such preemption could occur on function entry to |
| // startm. Callers passing a nil P may be preemptible, so we must |
| // disable preemption before acquiring a P from pidleget below. |
| mp := acquirem() |
| lock(&sched.lock) |
| if _p_ == nil { |
| _p_ = pidleget() |
| if _p_ == nil { |
| unlock(&sched.lock) |
| if spinning { |
| // The caller incremented nmspinning, but there are no idle Ps, |
| // so it's okay to just undo the increment and give up. |
| if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { |
| throw("startm: negative nmspinning") |
| } |
| } |
| releasem(mp) |
| return |
| } |
| } |
| nmp := mget() |
| if nmp == nil { |
| // No M is available, we must drop sched.lock and call newm. |
| // However, we already own a P to assign to the M. |
| // |
| // Once sched.lock is released, another G (e.g., in a syscall), |
| // could find no idle P while checkdead finds a runnable G but |
| // no running M's because this new M hasn't started yet, thus |
| // throwing in an apparent deadlock. |
| // |
| // Avoid this situation by pre-allocating the ID for the new M, |
| // thus marking it as 'running' before we drop sched.lock. This |
| // new M will eventually run the scheduler to execute any |
| // queued G's. |
| id := mReserveID() |
| unlock(&sched.lock) |
| |
| var fn func() |
| if spinning { |
| // The caller incremented nmspinning, so set m.spinning in the new M. |
| fn = mspinning |
| } |
| newm(fn, _p_, id) |
| // Ownership transfer of _p_ committed by start in newm. |
| // Preemption is now safe. |
| releasem(mp) |
| return |
| } |
| unlock(&sched.lock) |
| if nmp.spinning { |
| throw("startm: m is spinning") |
| } |
| if nmp.nextp != 0 { |
| throw("startm: m has p") |
| } |
| if spinning && !runqempty(_p_) { |
| throw("startm: p has runnable gs") |
| } |
| // The caller incremented nmspinning, so set m.spinning in the new M. |
| nmp.spinning = spinning |
| nmp.nextp.set(_p_) |
| notewakeup(&nmp.park) |
| // Ownership transfer of _p_ committed by wakeup. Preemption is now |
| // safe. |
| releasem(mp) |
| } |
| |
| // Hands off P from syscall or locked M. |
| // Always runs without a P, so write barriers are not allowed. |
| //go:nowritebarrierrec |
| func handoffp(_p_ *p) { |
| // handoffp must start an M in any situation where |
| // findrunnable would return a G to run on _p_. |
| |
| // if it has local work, start it straight away |
| if !runqempty(_p_) || sched.runqsize != 0 { |
| startm(_p_, false) |
| return |
| } |
| // if it has GC work, start it straight away |
| if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { |
| startm(_p_, false) |
| return |
| } |
| // no local work, check that there are no spinning/idle M's, |
| // otherwise our help is not required |
| if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic |
| startm(_p_, true) |
| return |
| } |
| lock(&sched.lock) |
| if sched.gcwaiting != 0 { |
| _p_.status = _Pgcstop |
| sched.stopwait-- |
| if sched.stopwait == 0 { |
| notewakeup(&sched.stopnote) |
| } |
| unlock(&sched.lock) |
| return |
| } |
| if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { |
| sched.safePointFn(_p_) |
| sched.safePointWait-- |
| if sched.safePointWait == 0 { |
| notewakeup(&sched.safePointNote) |
| } |
| } |
| if sched.runqsize != 0 { |
| unlock(&sched.lock) |
| startm(_p_, false) |
| return |
| } |
| // If this is the last running P and nobody is polling network, |
| // need to wakeup another M to poll network. |
| if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { |
| unlock(&sched.lock) |
| startm(_p_, false) |
| return |
| } |
| |
| // The scheduler lock cannot be held when calling wakeNetPoller below |
| // because wakeNetPoller may call wakep which may call startm. |
| when := nobarrierWakeTime(_p_) |
| pidleput(_p_) |
| unlock(&sched.lock) |
| |
| if when != 0 { |
| wakeNetPoller(when) |
| } |
| } |
| |
| // Tries to add one more P to execute G's. |
| // Called when a G is made runnable (newproc, ready). |
| func wakep() { |
| if atomic.Load(&sched.npidle) == 0 { |
| return |
| } |
| // be conservative about spinning threads |
| if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) { |
| return |
| } |
| startm(nil, true) |
| } |
| |
| // Stops execution of the current m that is locked to a g until the g is runnable again. |
| // Returns with acquired P. |
| func stoplockedm() { |
| _g_ := getg() |
| |
| if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { |
| throw("stoplockedm: inconsistent locking") |
| } |
| if _g_.m.p != 0 { |
| // Schedule another M to run this p. |
| _p_ := releasep() |
| handoffp(_p_) |
| } |
| incidlelocked(1) |
| // Wait until another thread schedules lockedg again. |
| mPark() |
| status := readgstatus(_g_.m.lockedg.ptr()) |
| if status&^_Gscan != _Grunnable { |
| print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n") |
| dumpgstatus(_g_.m.lockedg.ptr()) |
| throw("stoplockedm: not runnable") |
| } |
| acquirep(_g_.m.nextp.ptr()) |
| _g_.m.nextp = 0 |
| } |
| |
| // Schedules the locked m to run the locked gp. |
| // May run during STW, so write barriers are not allowed. |
| //go:nowritebarrierrec |
| func startlockedm(gp *g) { |
| _g_ := getg() |
| |
| mp := gp.lockedm.ptr() |
| if mp == _g_.m { |
| throw("startlockedm: locked to me") |
| } |
| if mp.nextp != 0 { |
| throw("startlockedm: m has p") |
| } |
| // directly handoff current P to the locked m |
| incidlelocked(-1) |
| _p_ := releasep() |
| mp.nextp.set(_p_) |
| notewakeup(&mp.park) |
| stopm() |
| } |
| |
| // Stops the current m for stopTheWorld. |
| // Returns when the world is restarted. |
| func gcstopm() { |
| _g_ := getg() |
| |
| if sched.gcwaiting == 0 { |
| throw("gcstopm: not waiting for gc") |
| } |
| if _g_.m.spinning { |
| _g_.m.spinning = false |
| // OK to just drop nmspinning here, |
| // startTheWorld will unpark threads as necessary. |
| if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { |
| throw("gcstopm: negative nmspinning") |
| } |
| } |
| _p_ := releasep() |
| lock(&sched.lock) |
| _p_.status = _Pgcstop |
| sched.stopwait-- |
| if sched.stopwait == 0 { |
| notewakeup(&sched.stopnote) |
| } |
| unlock(&sched.lock) |
| stopm() |
| } |
| |
| // Schedules gp to run on the current M. |
| // If inheritTime is true, gp inherits the remaining time in the |
| // current time slice. Otherwise, it starts a new time slice. |
| // Never returns. |
| // |
| // Write barriers are allowed because this is called immediately after |
| // acquiring a P in several places. |
| // |
| //go:yeswritebarrierrec |
| func execute(gp *g, inheritTime bool) { |
| _g_ := getg() |
| |
| // Assign gp.m before entering _Grunning so running Gs have an |
| // M. |
| _g_.m.curg = gp |
| gp.m = _g_.m |
| casgstatus(gp, _Grunnable, _Grunning) |
| gp.waitsince = 0 |
| gp.preempt = false |
| if !inheritTime { |
| _g_.m.p.ptr().schedtick++ |
| } |
| |
| // Check whether the profiler needs to be turned on or off. |
| hz := sched.profilehz |
| if _g_.m.profilehz != hz { |
| setThreadCPUProfiler(hz) |
| } |
| |
| if trace.enabled { |
| // GoSysExit has to happen when we have a P, but before GoStart. |
| // So we emit it here. |
| if gp.syscallsp != 0 && gp.sysblocktraced { |
| traceGoSysExit(gp.sysexitticks) |
| } |
| traceGoStart() |
| } |
| |
| gogo(gp) |
| } |
| |
| // Finds a runnable goroutine to execute. |
| // Tries to steal from other P's, get g from local or global queue, poll network. |
| func findrunnable() (gp *g, inheritTime bool) { |
| _g_ := getg() |
| |
| // The conditions here and in handoffp must agree: if |
| // findrunnable would return a G to run, handoffp must start |
| // an M. |
| |
| top: |
| _p_ := _g_.m.p.ptr() |
| if sched.gcwaiting != 0 { |
| gcstopm() |
| goto top |
| } |
| if _p_.runSafePointFn != 0 { |
| runSafePointFn() |
| } |
| |
| now, pollUntil, _ := checkTimers(_p_, 0) |
| |
| if fingwait && fingwake { |
| if gp := wakefing(); gp != nil { |
| ready(gp, 0, true) |
| } |
| } |
| if *cgo_yield != nil { |
| asmcgocall(*cgo_yield, nil) |
| } |
| |
| // local runq |
| if gp, inheritTime := runqget(_p_); gp != nil { |
| return gp, inheritTime |
| } |
| |
| // global runq |
| if sched.runqsize != 0 { |
| lock(&sched.lock) |
| gp := globrunqget(_p_, 0) |
| unlock(&sched.lock) |
| if gp != nil { |
| return gp, false |
| } |
| } |
| |
| // Poll network. |
| // This netpoll is only an optimization before we resort to stealing. |
| // We can safely skip it if there are no waiters or a thread is blocked |
| // in netpoll already. If there is any kind of logical race with that |
| // blocked thread (e.g. it has already returned from netpoll, but does |
| // not set lastpoll yet), this thread will do blocking netpoll below |
| // anyway. |
| if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { |
| if list := netpoll(0); !list.empty() { // non-blocking |
| gp := list.pop() |
| injectglist(&list) |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| if trace.enabled { |
| traceGoUnpark(gp, 0) |
| } |
| return gp, false |
| } |
| } |
| |
| // Steal work from other P's. |
| procs := uint32(gomaxprocs) |
| ranTimer := false |
| // If number of spinning M's >= number of busy P's, block. |
| // This is necessary to prevent excessive CPU consumption |
| // when GOMAXPROCS>>1 but the program parallelism is low. |
| if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { |
| goto stop |
| } |
| if !_g_.m.spinning { |
| _g_.m.spinning = true |
| atomic.Xadd(&sched.nmspinning, 1) |
| } |
| const stealTries = 4 |
| for i := 0; i < stealTries; i++ { |
| stealTimersOrRunNextG := i == stealTries-1 |
| |
| for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { |
| if sched.gcwaiting != 0 { |
| goto top |
| } |
| p2 := allp[enum.position()] |
| if _p_ == p2 { |
| continue |
| } |
| |
| // Steal timers from p2. This call to checkTimers is the only place |
| // where we might hold a lock on a different P's timers. We do this |
| // once on the last pass before checking runnext because stealing |
| // from the other P's runnext should be the last resort, so if there |
| // are timers to steal do that first. |
| // |
| // We only check timers on one of the stealing iterations because |
| // the time stored in now doesn't change in this loop and checking |
| // the timers for each P more than once with the same value of now |
| // is probably a waste of time. |
| // |
| // timerpMask tells us whether the P may have timers at all. If it |
| // can't, no need to check at all. |
| if stealTimersOrRunNextG && timerpMask.read(enum.position()) { |
| tnow, w, ran := checkTimers(p2, now) |
| now = tnow |
| if w != 0 && (pollUntil == 0 || w < pollUntil) { |
| pollUntil = w |
| } |
| if ran { |
| // Running the timers may have |
| // made an arbitrary number of G's |
| // ready and added them to this P's |
| // local run queue. That invalidates |
| // the assumption of runqsteal |
| // that is always has room to add |
| // stolen G's. So check now if there |
| // is a local G to run. |
| if gp, inheritTime := runqget(_p_); gp != nil { |
| return gp, inheritTime |
| } |
| ranTimer = true |
| } |
| } |
| |
| // Don't bother to attempt to steal if p2 is idle. |
| if !idlepMask.read(enum.position()) { |
| if gp := runqsteal(_p_, p2, stealTimersOrRunNextG); gp != nil { |
| return gp, false |
| } |
| } |
| } |
| } |
| if ranTimer { |
| // Running a timer may have made some goroutine ready. |
| goto top |
| } |
| |
| stop: |
| |
| // We have nothing to do. If we're in the GC mark phase, can |
| // safely scan and blacken objects, and have work to do, run |
| // idle-time marking rather than give up the P. |
| if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { |
| node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) |
| if node != nil { |
| _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode |
| gp := node.gp.ptr() |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| if trace.enabled { |
| traceGoUnpark(gp, 0) |
| } |
| return gp, false |
| } |
| } |
| |
| delta := int64(-1) |
| if pollUntil != 0 { |
| // checkTimers ensures that polluntil > now. |
| delta = pollUntil - now |
| } |
| |
| // wasm only: |
| // If a callback returned and no other goroutine is awake, |
| // then wake event handler goroutine which pauses execution |
| // until a callback was triggered. |
| gp, otherReady := beforeIdle(delta) |
| if gp != nil { |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| if trace.enabled { |
| traceGoUnpark(gp, 0) |
| } |
| return gp, false |
| } |
| if otherReady { |
| goto top |
| } |
| |
| // Before we drop our P, make a snapshot of the allp slice, |
| // which can change underfoot once we no longer block |
| // safe-points. We don't need to snapshot the contents because |
| // everything up to cap(allp) is immutable. |
| allpSnapshot := allp |
| // Also snapshot masks. Value changes are OK, but we can't allow |
| // len to change out from under us. |
| idlepMaskSnapshot := idlepMask |
| timerpMaskSnapshot := timerpMask |
| |
| // return P and block |
| lock(&sched.lock) |
| if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { |
| unlock(&sched.lock) |
| goto top |
| } |
| if sched.runqsize != 0 { |
| gp := globrunqget(_p_, 0) |
| unlock(&sched.lock) |
| return gp, false |
| } |
| if releasep() != _p_ { |
| throw("findrunnable: wrong p") |
| } |
| pidleput(_p_) |
| unlock(&sched.lock) |
| |
| // Delicate dance: thread transitions from spinning to non-spinning state, |
| // potentially concurrently with submission of new goroutines. We must |
| // drop nmspinning first and then check all per-P queues again (with |
| // #StoreLoad memory barrier in between). If we do it the other way around, |
| // another thread can submit a goroutine after we've checked all run queues |
| // but before we drop nmspinning; as a result nobody will unpark a thread |
| // to run the goroutine. |
| // If we discover new work below, we need to restore m.spinning as a signal |
| // for resetspinning to unpark a new worker thread (because there can be more |
| // than one starving goroutine). However, if after discovering new work |
| // we also observe no idle Ps, it is OK to just park the current thread: |
| // the system is fully loaded so no spinning threads are required. |
| // Also see "Worker thread parking/unparking" comment at the top of the file. |
| wasSpinning := _g_.m.spinning |
| if _g_.m.spinning { |
| _g_.m.spinning = false |
| if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { |
| throw("findrunnable: negative nmspinning") |
| } |
| } |
| |
| // check all runqueues once again |
| for id, _p_ := range allpSnapshot { |
| if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(_p_) { |
| lock(&sched.lock) |
| _p_ = pidleget() |
| unlock(&sched.lock) |
| if _p_ != nil { |
| acquirep(_p_) |
| if wasSpinning { |
| _g_.m.spinning = true |
| atomic.Xadd(&sched.nmspinning, 1) |
| } |
| goto top |
| } |
| break |
| } |
| } |
| |
| // Similar to above, check for timer creation or expiry concurrently with |
| // transitioning from spinning to non-spinning. Note that we cannot use |
| // checkTimers here because it calls adjusttimers which may need to allocate |
| // memory, and that isn't allowed when we don't have an active P. |
| for id, _p_ := range allpSnapshot { |
| if timerpMaskSnapshot.read(uint32(id)) { |
| w := nobarrierWakeTime(_p_) |
| if w != 0 && (pollUntil == 0 || w < pollUntil) { |
| pollUntil = w |
| } |
| } |
| } |
| if pollUntil != 0 { |
| if now == 0 { |
| now = nanotime() |
| } |
| delta = pollUntil - now |
| if delta < 0 { |
| delta = 0 |
| } |
| } |
| |
| // Check for idle-priority GC work again. |
| // |
| // N.B. Since we have no P, gcBlackenEnabled may change at any time; we |
| // must check again after acquiring a P. |
| if atomic.Load(&gcBlackenEnabled) != 0 && gcMarkWorkAvailable(nil) { |
| // Work is available; we can start an idle GC worker only if |
| // there is an available P and available worker G. |
| // |
| // We can attempt to acquire these in either order. Workers are |
| // almost always available (see comment in findRunnableGCWorker |
| // for the one case there may be none). Since we're slightly |
| // less likely to find a P, check for that first. |
| lock(&sched.lock) |
| var node *gcBgMarkWorkerNode |
| _p_ = pidleget() |
| if _p_ != nil { |
| // Now that we own a P, gcBlackenEnabled can't change |
| // (as it requires STW). |
| if gcBlackenEnabled != 0 { |
| node = (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) |
| if node == nil { |
| pidleput(_p_) |
| _p_ = nil |
| } |
| } else { |
| pidleput(_p_) |
| _p_ = nil |
| } |
| } |
| unlock(&sched.lock) |
| if _p_ != nil { |
| acquirep(_p_) |
| if wasSpinning { |
| _g_.m.spinning = true |
| atomic.Xadd(&sched.nmspinning, 1) |
| } |
| |
| // Run the idle worker. |
| _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode |
| gp := node.gp.ptr() |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| if trace.enabled { |
| traceGoUnpark(gp, 0) |
| } |
| return gp, false |
| } |
| } |
| |
| // poll network |
| if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { |
| atomic.Store64(&sched.pollUntil, uint64(pollUntil)) |
| if _g_.m.p != 0 { |
| throw("findrunnable: netpoll with p") |
| } |
| if _g_.m.spinning { |
| throw("findrunnable: netpoll with spinning") |
| } |
| if faketime != 0 { |
| // When using fake time, just poll. |
| delta = 0 |
| } |
| list := netpoll(delta) // block until new work is available |
| atomic.Store64(&sched.pollUntil, 0) |
| atomic.Store64(&sched.lastpoll, uint64(nanotime())) |
| if faketime != 0 && list.empty() { |
| // Using fake time and nothing is ready; stop M. |
| // When all M's stop, checkdead will call timejump. |
| stopm() |
| goto top |
| } |
| lock(&sched.lock) |
| _p_ = pidleget() |
| unlock(&sched.lock) |
| if _p_ == nil { |
| injectglist(&list) |
| } else { |
| acquirep(_p_) |
| if !list.empty() { |
| gp := list.pop() |
| injectglist(&list) |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| if trace.enabled { |
| traceGoUnpark(gp, 0) |
| } |
| return gp, false |
| } |
| if wasSpinning { |
| _g_.m.spinning = true |
| atomic.Xadd(&sched.nmspinning, 1) |
| } |
| goto top |
| } |
| } else if pollUntil != 0 && netpollinited() { |
| pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) |
| if pollerPollUntil == 0 || pollerPollUntil > pollUntil { |
| netpollBreak() |
| } |
| } |
| stopm() |
| goto top |
| } |
| |
| // pollWork reports whether there is non-background work this P could |
| // be doing. This is a fairly lightweight check to be used for |
| // background work loops, like idle GC. It checks a subset of the |
| // conditions checked by the actual scheduler. |
| func pollWork() bool { |
| if sched.runqsize != 0 { |
| return true |
| } |
| p := getg().m.p.ptr() |
| if !runqempty(p) { |
| return true |
| } |
| if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { |
| if list := netpoll(0); !list.empty() { |
| injectglist(&list) |
| return true |
| } |
| } |
| return false |
| } |
| |
| // wakeNetPoller wakes up the thread sleeping in the network poller if it isn't |
| // going to wake up before the when argument; or it wakes an idle P to service |
| // timers and the network poller if there isn't one already. |
| func wakeNetPoller(when int64) { |
| if atomic.Load64(&sched.lastpoll) == 0 { |
| // In findrunnable we ensure that when polling the pollUntil |
| // field is either zero or the time to which the current |
| // poll is expected to run. This can have a spurious wakeup |
| // but should never miss a wakeup. |
| pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) |
| if pollerPollUntil == 0 || pollerPollUntil > when { |
| netpollBreak() |
| } |
| } else { |
| // There are no threads in the network poller, try to get |
| // one there so it can handle new timers. |
| if GOOS != "plan9" { // Temporary workaround - see issue #42303. |
| wakep() |
| } |
| } |
| } |
| |
| func resetspinning() { |
| _g_ := getg() |
| if !_g_.m.spinning { |
| throw("resetspinning: not a spinning m") |
| } |
| _g_.m.spinning = false |
| nmspinning := atomic.Xadd(&sched.nmspinning, -1) |
| if int32(nmspinning) < 0 { |
| throw("findrunnable: negative nmspinning") |
| } |
| // M wakeup policy is deliberately somewhat conservative, so check if we |
| // need to wakeup another P here. See "Worker thread parking/unparking" |
| // comment at the top of the file for details. |
| wakep() |
| } |
| |
| // injectglist adds each runnable G on the list to some run queue, |
| // and clears glist. If there is no current P, they are added to the |
| // global queue, and up to npidle M's are started to run them. |
| // Otherwise, for each idle P, this adds a G to the global queue |
| // and starts an M. Any remaining G's are added to the current P's |
| // local run queue. |
| // This may temporarily acquire sched.lock. |
| // Can run concurrently with GC. |
| func injectglist(glist *gList) { |
| if glist.empty() { |
| return |
| } |
| if trace.enabled { |
| for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { |
| traceGoUnpark(gp, 0) |
| } |
| } |
| |
| // Mark all the goroutines as runnable before we put them |
| // on the run queues. |
| head := glist.head.ptr() |
| var tail *g |
| qsize := 0 |
| for gp := head; gp != nil; gp = gp.schedlink.ptr() { |
| tail = gp |
| qsize++ |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| } |
| |
| // Turn the gList into a gQueue. |
| var q gQueue |
| q.head.set(head) |
| q.tail.set(tail) |
| *glist = gList{} |
| |
| startIdle := func(n int) { |
| for ; n != 0 && sched.npidle != 0; n-- { |
| startm(nil, false) |
| } |
| } |
| |
| pp := getg().m.p.ptr() |
| if pp == nil { |
| lock(&sched.lock) |
| globrunqputbatch(&q, int32(qsize)) |
| unlock(&sched.lock) |
| startIdle(qsize) |
| return |
| } |
| |
| npidle := int(atomic.Load(&sched.npidle)) |
| var globq gQueue |
| var n int |
| for n = 0; n < npidle && !q.empty(); n++ { |
| g := q.pop() |
| globq.pushBack(g) |
| } |
| if n > 0 { |
| lock(&sched.lock) |
| globrunqputbatch(&globq, int32(n)) |
| unlock(&sched.lock) |
| startIdle(n) |
| qsize -= n |
| } |
| |
| if !q.empty() { |
| runqputbatch(pp, &q, qsize) |
| } |
| } |
| |
| // One round of scheduler: find a runnable goroutine and execute it. |
| // Never returns. |
| func schedule() { |
| _g_ := getg() |
| |
| if _g_.m.locks != 0 { |
| throw("schedule: holding locks") |
| } |
| |
| if _g_.m.lockedg != 0 { |
| stoplockedm() |
| execute(_g_.m.lockedg.ptr(), false) // Never returns. |
| } |
| |
| // We should not schedule away from a g that is executing a cgo call, |
| // since the cgo call is using the m's g0 stack. |
| if _g_.m.incgo { |
| throw("schedule: in cgo") |
| } |
| |
| top: |
| pp := _g_.m.p.ptr() |
| pp.preempt = false |
| |
| if sched.gcwaiting != 0 { |
| gcstopm() |
| goto top |
| } |
| if pp.runSafePointFn != 0 { |
| runSafePointFn() |
| } |
| |
| // Sanity check: if we are spinning, the run queue should be empty. |
| // Check this before calling checkTimers, as that might call |
| // goready to put a ready goroutine on the local run queue. |
| if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { |
| throw("schedule: spinning with local work") |
| } |
| |
| checkTimers(pp, 0) |
| |
| var gp *g |
| var inheritTime bool |
| |
| // Normal goroutines will check for need to wakeP in ready, |
| // but GCworkers and tracereaders will not, so the check must |
| // be done here instead. |
| tryWakeP := false |
| if trace.enabled || trace.shutdown { |
| gp = traceReader() |
| if gp != nil { |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| traceGoUnpark(gp, 0) |
| tryWakeP = true |
| } |
| } |
| if gp == nil && gcBlackenEnabled != 0 { |
| gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) |
| tryWakeP = tryWakeP || gp != nil |
| } |
| if gp == nil { |
| // Check the global runnable queue once in a while to ensure fairness. |
| // Otherwise two goroutines can completely occupy the local runqueue |
| // by constantly respawning each other. |
| if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { |
| lock(&sched.lock) |
| gp = globrunqget(_g_.m.p.ptr(), 1) |
| unlock(&sched.lock) |
| } |
| } |
| if gp == nil { |
| gp, inheritTime = runqget(_g_.m.p.ptr()) |
| // We can see gp != nil here even if the M is spinning, |
| // if checkTimers added a local goroutine via goready. |
| |
| // Because gccgo does not implement preemption as a stack check, |
| // we need to check for preemption here for fairness. |
| // Otherwise goroutines on the local queue may starve |
| // goroutines on the global queue. |
| // Since we preempt by storing the goroutine on the global |
| // queue, this is the only place we need to check preempt. |
| // This does not call checkPreempt because gp is not running. |
| if gp != nil && gp.preempt { |
| gp.preempt = false |
| lock(&sched.lock) |
| globrunqput(gp) |
| unlock(&sched.lock) |
| goto top |
| } |
| } |
| if gp == nil { |
| gp, inheritTime = findrunnable() // blocks until work is available |
| } |
| |
| // This thread is going to run a goroutine and is not spinning anymore, |
| // so if it was marked as spinning we need to reset it now and potentially |
| // start a new spinning M. |
| if _g_.m.spinning { |
| resetspinning() |
| } |
| |
| if sched.disable.user && !schedEnabled(gp) { |
| // Scheduling of this goroutine is disabled. Put it on |
| // the list of pending runnable goroutines for when we |
| // re-enable user scheduling and look again. |
| lock(&sched.lock) |
| if schedEnabled(gp) { |
| // Something re-enabled scheduling while we |
| // were acquiring the lock. |
| unlock(&sched.lock) |
| } else { |
| sched.disable.runnable.pushBack(gp) |
| sched.disable.n++ |
| unlock(&sched.lock) |
| goto top |
| } |
| } |
| |
| // If about to schedule a not-normal goroutine (a GCworker or tracereader), |
| // wake a P if there is one. |
| if tryWakeP { |
| wakep() |
| } |
| if gp.lockedm != 0 { |
| // Hands off own p to the locked m, |
| // then blocks waiting for a new p. |
| startlockedm(gp) |
| goto top |
| } |
| |
| execute(gp, inheritTime) |
| } |
| |
| // dropg removes the association between m and the current goroutine m->curg (gp for short). |
| // Typically a caller sets gp's status away from Grunning and then |
| // immediately calls dropg to finish the job. The caller is also responsible |
| // for arranging that gp will be restarted using ready at an |
| // appropriate time. After calling dropg and arranging for gp to be |
| // readied later, the caller can do other work but eventually should |
| // call schedule to restart the scheduling of goroutines on this m. |
| func dropg() { |
| _g_ := getg() |
| |
| setMNoWB(&_g_.m.curg.m, nil) |
| setGNoWB(&_g_.m.curg, nil) |
| } |
| |
| // checkTimers runs any timers for the P that are ready. |
| // If now is not 0 it is the current time. |
| // It returns the current time or 0 if it is not known, |
| // and the time when the next timer should run or 0 if there is no next timer, |
| // and reports whether it ran any timers. |
| // If the time when the next timer should run is not 0, |
| // it is always larger than the returned time. |
| // We pass now in and out to avoid extra calls of nanotime. |
| //go:yeswritebarrierrec |
| func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { |
| // If it's not yet time for the first timer, or the first adjusted |
| // timer, then there is nothing to do. |
| next := int64(atomic.Load64(&pp.timer0When)) |
| nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest)) |
| if next == 0 || (nextAdj != 0 && nextAdj < next) { |
| next = nextAdj |
| } |
| |
| if next == 0 { |
| // No timers to run or adjust. |
| return now, 0, false |
| } |
| |
| if now == 0 { |
| now = nanotime() |
| } |
| if now < next { |
| // Next timer is not ready to run, but keep going |
| // if we would clear deleted timers. |
| // This corresponds to the condition below where |
| // we decide whether to call clearDeletedTimers. |
| if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) { |
| return now, next, false |
| } |
| } |
| |
| lock(&pp.timersLock) |
| |
| if len(pp.timers) > 0 { |
| adjusttimers(pp, now) |
| for len(pp.timers) > 0 { |
| // Note that runtimer may temporarily unlock |
| // pp.timersLock. |
| if tw := runtimer(pp, now); tw != 0 { |
| if tw > 0 { |
| pollUntil = tw |
| } |
| break |
| } |
| ran = true |
| } |
| } |
| |
| // If this is the local P, and there are a lot of deleted timers, |
| // clear them out. We only do this for the local P to reduce |
| // lock contention on timersLock. |
| if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 { |
| clearDeletedTimers(pp) |
| } |
| |
| unlock(&pp.timersLock) |
| |
| return now, pollUntil, ran |
| } |
| |
| func parkunlock_c(gp *g, lock unsafe.Pointer) bool { |
| unlock((*mutex)(lock)) |
| return true |
| } |
| |
| // park continuation on g0. |
| func park_m(gp *g) { |
| _g_ := getg() |
| |
| if trace.enabled { |
| traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) |
| } |
| |
| casgstatus(gp, _Grunning, _Gwaiting) |
| dropg() |
| |
| if fn := _g_.m.waitunlockf; fn != nil { |
| ok := fn(gp, _g_.m.waitlock) |
| _g_.m.waitunlockf = nil |
| _g_.m.waitlock = nil |
| if !ok { |
| if trace.enabled { |
| traceGoUnpark(gp, 2) |
| } |
| casgstatus(gp, _Gwaiting, _Grunnable) |
| execute(gp, true) // Schedule it back, never returns. |
| } |
| } |
| schedule() |
| } |
| |
| func goschedImpl(gp *g) { |
| status := readgstatus(gp) |
| if status&^_Gscan != _Grunning { |
| dumpgstatus(gp) |
| throw("bad g status") |
| } |
| casgstatus(gp, _Grunning, _Grunnable) |
| dropg() |
| lock(&sched.lock) |
| globrunqput(gp) |
| unlock(&sched.lock) |
| |
| schedule() |
| } |
| |
| // Gosched continuation on g0. |
| func gosched_m(gp *g) { |
| if trace.enabled { |
| traceGoSched() |
| } |
| goschedImpl(gp) |
| } |
| |
| // goschedguarded is a forbidden-states-avoided version of gosched_m |
| func goschedguarded_m(gp *g) { |
| |
| if !canPreemptM(gp.m) { |
| gogo(gp) // never return |
| } |
| |
| if trace.enabled { |
| traceGoSched() |
| } |
| goschedImpl(gp) |
| } |
| |
| func gopreempt_m(gp *g) { |
| if trace.enabled { |
| traceGoPreempt() |
| } |
| goschedImpl(gp) |
| } |
| |
| // preemptPark parks gp and puts it in _Gpreempted. |
| // |
| //go:systemstack |
| func preemptPark(gp *g) { |
| if trace.enabled { |
| traceGoPark(traceEvGoBlock, 0) |
| } |
| status := readgstatus(gp) |
| if status&^_Gscan != _Grunning { |
| dumpgstatus(gp) |
| throw("bad g status") |
| } |
| gp.waitreason = waitReasonPreempted |
| // Transition from _Grunning to _Gscan|_Gpreempted. We can't |
| // be in _Grunning when we dropg because then we'd be running |
| // without an M, but the moment we're in _Gpreempted, |
| // something could claim this G before we've fully cleaned it |
| // up. Hence, we set the scan bit to lock down further |
| // transitions until we can dropg. |
| casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted) |
| dropg() |
| casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted) |
| schedule() |
| } |
| |
| // goyield is like Gosched, but it: |
| // - emits a GoPreempt trace event instead of a GoSched trace event |
| // - puts the current G on the runq of the current P instead of the globrunq |
| func goyield() { |
| checkTimeouts() |
| mcall(goyield_m) |
| } |
| |
| func goyield_m(gp *g) { |
| if trace.enabled { |
| traceGoPreempt() |
| } |
| pp := gp.m.p.ptr() |
| casgstatus(gp, _Grunning, _Grunnable) |
| dropg() |
| runqput(pp, gp, false) |
| schedule() |
| } |
| |
| // Finishes execution of the current goroutine. |
| func goexit1() { |
| if trace.enabled { |
| traceGoEnd() |
| } |
| mcall(goexit0) |
| } |
| |
| // goexit continuation on g0. |
| func goexit0(gp *g) { |
| _g_ := getg() |
| |
| casgstatus(gp, _Grunning, _Gdead) |
| if isSystemGoroutine(gp, false) { |
| atomic.Xadd(&sched.ngsys, -1) |
| gp.isSystemGoroutine = false |
| } |
| gp.m = nil |
| locked := gp.lockedm != 0 |
| gp.lockedm = 0 |
| _g_.m.lockedg = 0 |
| gp.entry = nil |
| gp.preemptStop = false |
| gp.paniconfault = false |
| gp._defer = nil // should be true already but just in case. |
| gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. |
| gp.writebuf = nil |
| gp.waitreason = 0 |
| gp.param = nil |
| gp.labels = nil |
| gp.timer = nil |
| |
| if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { |
| // Flush assist credit to the global pool. This gives |
| // better information to pacing if the application is |
| // rapidly creating an exiting goroutines. |
| assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte)) |
| scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes)) |
| atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) |
| gp.gcAssistBytes = 0 |
| } |
| |
| dropg() |
| |
| if GOARCH == "wasm" { // no threads yet on wasm |
| gfput(_g_.m.p.ptr(), gp) |
| schedule() // never returns |
| } |
| |
| if _g_.m.lockedInt != 0 { |
| print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") |
| throw("internal lockOSThread error") |
| } |
| gfput(_g_.m.p.ptr(), gp) |
| if locked { |
| // The goroutine may have locked this thread because |
| // it put it in an unusual kernel state. Kill it |
| // rather than returning it to the thread pool. |
| |
| // Return to mstart, which will release the P and exit |
| // the thread. |
| if GOOS != "plan9" { // See golang.org/issue/22227. |
| _g_.m.exiting = true |
| gogo(_g_.m.g0) |
| } else { |
| // Clear lockedExt on plan9 since we may end up re-using |
| // this thread. |
| _g_.m.lockedExt = 0 |
| } |
| } |
| schedule() |
| } |
| |
| // The goroutine g is about to enter a system call. |
| // Record that it's not using the cpu anymore. |
| // This is called only from the go syscall library and cgocall, |
| // not from the low-level system calls used by the runtime. |
| // |
| // The entersyscall function is written in C, so that it can save the |
| // current register context so that the GC will see them. |
| // It calls reentersyscall. |
| // |
| // Syscall tracing: |
| // At the start of a syscall we emit traceGoSysCall to capture the stack trace. |
| // If the syscall does not block, that is it, we do not emit any other events. |
| // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; |
| // when syscall returns we emit traceGoSysExit and when the goroutine starts running |
| // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. |
| // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, |
| // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), |
| // whoever emits traceGoSysBlock increments p.syscalltick afterwards; |
| // and we wait for the increment before emitting traceGoSysExit. |
| // Note that the increment is done even if tracing is not enabled, |
| // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. |
| // |
| //go:nosplit |
| //go:noinline |
| func reentersyscall(pc, sp uintptr) { |
| _g_ := getg() |
| |
| // Disable preemption because during this function g is in Gsyscall status, |
| // but can have inconsistent g->sched, do not let GC observe it. |
| _g_.m.locks++ |
| |
| _g_.syscallsp = sp |
| _g_.syscallpc = pc |
| casgstatus(_g_, _Grunning, _Gsyscall) |
| |
| if trace.enabled { |
| systemstack(traceGoSysCall) |
| } |
| |
| if atomic.Load(&sched.sysmonwait) != 0 { |
| systemstack(entersyscall_sysmon) |
| } |
| |
| if _g_.m.p.ptr().runSafePointFn != 0 { |
| // runSafePointFn may stack split if run on this stack |
| systemstack(runSafePointFn) |
| } |
| |
| _g_.m.syscalltick = _g_.m.p.ptr().syscalltick |
| _g_.sysblocktraced = true |
| pp := _g_.m.p.ptr() |
| pp.m = 0 |
| _g_.m.oldp.set(pp) |
| _g_.m.p = 0 |
| atomic.Store(&pp.status, _Psyscall) |
| if sched.gcwaiting != 0 { |
| systemstack(entersyscall_gcwait) |
| } |
| |
| _g_.m.locks-- |
| } |
| |
| func entersyscall_sysmon() { |
| lock(&sched.lock) |
| if atomic.Load(&sched.sysmonwait) != 0 { |
| atomic.Store(&sched.sysmonwait, 0) |
| notewakeup(&sched.sysmonnote) |
| } |
| unlock(&sched.lock) |
| } |
| |
| func entersyscall_gcwait() { |
| _g_ := getg() |
| _p_ := _g_.m.oldp.ptr() |
| |
| lock(&sched.lock) |
| if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { |
| if trace.enabled { |
| traceGoSysBlock(_p_) |
| traceProcStop(_p_) |
| } |
| _p_.syscalltick++ |
| if sched.stopwait--; sched.stopwait == 0 { |
| notewakeup(&sched.stopnote) |
| } |
| } |
| unlock(&sched.lock) |
| } |
| |
| func reentersyscallblock(pc, sp uintptr) { |
| _g_ := getg() |
| |
| _g_.m.locks++ // see comment in entersyscall |
| _g_.throwsplit = true |
| _g_.m.syscalltick = _g_.m.p.ptr().syscalltick |
| _g_.sysblocktraced = true |
| _g_.m.p.ptr().syscalltick++ |
| |
| // Leave SP around for GC and traceback. |
| _g_.syscallsp = sp |
| _g_.syscallpc = pc |
| casgstatus(_g_, _Grunning, _Gsyscall) |
| systemstack(entersyscallblock_handoff) |
| |
| _g_.m.locks-- |
| } |
| |
| func entersyscallblock_handoff() { |
| if trace.enabled { |
| traceGoSysCall() |
| traceGoSysBlock(getg().m.p.ptr()) |
| } |
| handoffp(releasep()) |
| } |
| |
| // The goroutine g exited its system call. |
| // Arrange for it to run on a cpu again. |
| // This is called only from the go syscall library, not |
| // from the low-level system calls used by the runtime. |
| // |
| // Write barriers are not allowed because our P may have been stolen. |
| // |
| //go:nosplit |
| //go:nowritebarrierrec |
| func exitsyscall() { |
| _g_ := getg() |
| |
| _g_.m.locks++ // see comment in entersyscall |
| |
| _g_.waitsince = 0 |
| oldp := _g_.m.oldp.ptr() |
| _g_.m.oldp = 0 |
| if exitsyscallfast(oldp) { |
| if trace.enabled { |
| if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { |
| systemstack(traceGoStart) |
| } |
| } |
| // There's a cpu for us, so we can run. |
| _g_.m.p.ptr().syscalltick++ |
| // We need to cas the status and scan before resuming... |
| casgstatus(_g_, _Gsyscall, _Grunning) |
| |
| exitsyscallclear(_g_) |
| _g_.m.locks-- |
| _g_.throwsplit = false |
| |
| // Check preemption, since unlike gc we don't check on |
| // every call. |
| if getg().preempt { |
| checkPreempt() |
| } |
| _g_.throwsplit = false |
| |
| if sched.disable.user && !schedEnabled(_g_) { |
| // Scheduling of this goroutine is disabled. |
| Gosched() |
| } |
| |
| return |
| } |
| |
| _g_.sysexitticks = 0 |
| if trace.enabled { |
| // Wait till traceGoSysBlock event is emitted. |
| // This ensures consistency of the trace (the goroutine is started after it is blocked). |
| for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { |
| osyield() |
| } |
| // We can't trace syscall exit right now because we don't have a P. |
| // Tracing code can invoke write barriers that cannot run without a P. |
| // So instead we remember the syscall exit time and emit the event |
| // in execute when we have a P. |
| _g_.sysexitticks = cputicks() |
| } |
| |
| _g_.m.locks-- |
| |
| // Call the scheduler. |
| mcall(exitsyscall0) |
| |
| // Scheduler returned, so we're allowed to run now. |
| // Delete the syscallsp information that we left for |
| // the garbage collector during the system call. |
| // Must wait until now because until gosched returns |
| // we don't know for sure that the garbage collector |
| // is not running. |
| exitsyscallclear(_g_) |
| |
| _g_.m.p.ptr().syscalltick++ |
| _g_.throwsplit = false |
| } |
| |
| //go:nosplit |
| func exitsyscallfast(oldp *p) bool { |
| _g_ := getg() |
| |
| // Freezetheworld sets stopwait but does not retake P's. |
| if sched.stopwait == freezeStopWait { |
| return false |
| } |
| |
| // Try to re-acquire the last P. |
| if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { |
| // There's a cpu for us, so we can run. |
| wirep(oldp) |
| exitsyscallfast_reacquired() |
| return true |
| } |
| |
| // Try to get any other idle P. |
| if sched.pidle != 0 { |
| var ok bool |
| systemstack(func() { |
| ok = exitsyscallfast_pidle() |
| if ok && trace.enabled { |
| if oldp != nil { |
| // Wait till traceGoSysBlock event is emitted. |
| // This ensures consistency of the trace (the goroutine is started after it is blocked). |
| for oldp.syscalltick == _g_.m.syscalltick { |
| osyield() |
| } |
| } |
| traceGoSysExit(0) |
| } |
| }) |
| if ok { |
| return true |
| } |
| } |
| return false |
| } |
| |
| // exitsyscallfast_reacquired is the exitsyscall path on which this G |
| // has successfully reacquired the P it was running on before the |
| // syscall. |
| // |
| //go:nosplit |
| func exitsyscallfast_reacquired() { |
| _g_ := getg() |
| if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { |
| if trace.enabled { |
| // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). |
| // traceGoSysBlock for this syscall was already emitted, |
| // but here we effectively retake the p from the new syscall running on the same p. |
| systemstack(func() { |
| // Denote blocking of the new syscall. |
| traceGoSysBlock(_g_.m.p.ptr()) |
| // Denote completion of the current syscall. |
| traceGoSysExit(0) |
| }) |
| } |
| _g_.m.p.ptr().syscalltick++ |
| } |
| } |
| |
| func exitsyscallfast_pidle() bool { |
| lock(&sched.lock) |
| _p_ := pidleget() |
| if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { |
| atomic.Store(&sched.sysmonwait, 0) |
| notewakeup(&sched.sysmonnote) |
| } |
| unlock(&sched.lock) |
| if _p_ != nil { |
| acquirep(_p_) |
| return true |
| } |
| return false |
| } |
| |
| // exitsyscall slow path on g0. |
| // Failed to acquire P, enqueue gp as runnable. |
| // |
| //go:nowritebarrierrec |
| func exitsyscall0(gp *g) { |
| _g_ := getg() |
| |
| casgstatus(gp, _Gsyscall, _Gexitingsyscall) |
| dropg() |
| casgstatus(gp, _Gexitingsyscall, _Grunnable) |
| lock(&sched.lock) |
| var _p_ *p |
| if schedEnabled(_g_) { |
| _p_ = pidleget() |
| } |
| if _p_ == nil { |
| globrunqput(gp) |
| } else if atomic.Load(&sched.sysmonwait) != 0 { |
| atomic.Store(&sched.sysmonwait, 0) |
| notewakeup(&sched.sysmonnote) |
| } |
| unlock(&sched.lock) |
| if _p_ != nil { |
| acquirep(_p_) |
| execute(gp, false) // Never returns. |
| } |
| if _g_.m.lockedg != 0 { |
| // Wait until another thread schedules gp and so m again. |
| stoplockedm() |
| execute(gp, false) // Never returns. |
| } |
| stopm() |
| schedule() // Never returns. |
| } |
| |
| // exitsyscallclear clears GC-related information that we only track |
| // during a syscall. |
| func exitsyscallclear(gp *g) { |
| // Garbage collector isn't running (since we are), so okay to |
| // clear syscallsp. |
| gp.syscallsp = 0 |
| |
| gp.gcstack = 0 |
| gp.gcnextsp = 0 |
| memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs)) |
| } |
| |
| // Code generated by cgo, and some library code, calls syscall.Entersyscall |
| // and syscall.Exitsyscall. |
| |
| //go:linkname syscall_entersyscall syscall.Entersyscall |
| //go:nosplit |
| func syscall_entersyscall() { |
| entersyscall() |
| } |
| |
| //go:linkname syscall_exitsyscall syscall.Exitsyscall |
| //go:nosplit |
| func syscall_exitsyscall() { |
| exitsyscall() |
| } |
| |
| func beforefork() { |
| gp := getg().m.curg |
| |
| // Block signals during a fork, so that the child does not run |
| // a signal handler before exec if a signal is sent to the process |
| // group. See issue #18600. |
| gp.m.locks++ |
| sigsave(&gp.m.sigmask) |
| sigblock(false) |
| } |
| |
| // Called from syscall package before fork. |
| //go:linkname syscall_runtime_BeforeFork syscall.runtime__BeforeFork |
| //go:nosplit |
| func syscall_runtime_BeforeFork() { |
| systemstack(beforefork) |
| } |
| |
| func afterfork() { |
| gp := getg().m.curg |
| |
| msigrestore(gp.m.sigmask) |
| |
| gp.m.locks-- |
| } |
| |
| // Called from syscall package after fork in parent. |
| //go:linkname syscall_runtime_AfterFork syscall.runtime__AfterFork |
| //go:nosplit |
| func syscall_runtime_AfterFork() { |
| systemstack(afterfork) |
| } |
| |
| // inForkedChild is true while manipulating signals in the child process. |
| // This is used to avoid calling libc functions in case we are using vfork. |
| var inForkedChild bool |
| |
| // Called from syscall package after fork in child. |
| // It resets non-sigignored signals to the default handler, and |
| // restores the signal mask in preparation for the exec. |
| // |
| // Because this might be called during a vfork, and therefore may be |
| // temporarily sharing address space with the parent process, this must |
| // not change any global variables or calling into C code that may do so. |
| // |
| //go:linkname syscall_runtime_AfterForkInChild syscall.runtime__AfterForkInChild |
| //go:nosplit |
| //go:nowritebarrierrec |
| func syscall_runtime_AfterForkInChild() { |
| // It's OK to change the global variable inForkedChild here |
| // because we are going to change it back. There is no race here, |
| // because if we are sharing address space with the parent process, |
| // then the parent process can not be running concurrently. |
| inForkedChild = true |
| |
| clearSignalHandlers() |
| |
| // When we are the child we are the only thread running, |
| // so we know that nothing else has changed gp.m.sigmask. |
| msigrestore(getg().m.sigmask) |
| |
| inForkedChild = false |
| } |
| |
| // pendingPreemptSignals is the number of preemption signals |
| // that have been sent but not received. This is only used on Darwin. |
| // For #41702. |
| var pendingPreemptSignals uint32 |
| |
| // Called from syscall package before Exec. |
| //go:linkname syscall_runtime_BeforeExec syscall.runtime__BeforeExec |
| func syscall_runtime_BeforeExec() { |
| // Prevent thread creation during exec. |
| execLock.lock() |
| |
| // On Darwin, wait for all pending preemption signals to |
| // be received. See issue #41702. |
| if GOOS == "darwin" || GOOS == "ios" { |
| for int32(atomic.Load(&pendingPreemptSignals)) > 0 { |
| osyield() |
| } |
| } |
| } |
| |
| // Called from syscall package after Exec. |
| //go:linkname syscall_runtime_AfterExec syscall.runtime__AfterExec |
| func syscall_runtime_AfterExec() { |
| execLock.unlock() |
| } |
| |
| // panicgonil is used for gccgo as we need to use a compiler check for |
| // a nil func, in case we have to build a thunk. |
| //go:linkname panicgonil |
| func panicgonil() { |
| getg().m.throwing = -1 // do not dump full stacks |
| throw("go of nil func value") |
| } |
| |
| // Create a new g running fn passing arg as the single argument. |
| // Put it on the queue of g's waiting to run. |
| // The compiler turns a go statement into a call to this. |
| //go:linkname newproc __go_go |
| func newproc(fn uintptr, arg unsafe.Pointer) *g { |
| _g_ := getg() |
| |
| if fn == 0 { |
| _g_.m.throwing = -1 // do not dump full stacks |
| throw("go of nil func value") |
| } |
| acquirem() // disable preemption because it can be holding p in a local var |
| |
| _p_ := _g_.m.p.ptr() |
| newg := gfget(_p_) |
| var ( |
| sp unsafe.Pointer |
| spsize uintptr |
| ) |
| if newg == nil { |
| newg = malg(true, false, &sp, &spsize) |
| casgstatus(newg, _Gidle, _Gdead) |
| allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. |
| } else { |
| resetNewG(newg, &sp, &spsize) |
| } |
| newg.traceback = 0 |
| |
| if readgstatus(newg) != _Gdead { |
| throw("newproc1: new g is not Gdead") |
| } |
| |
| // Store the C function pointer into entryfn, take the address |
| // of entryfn, convert it to a Go function value, and store |
| // that in entry. |
| newg.entryfn = fn |
| var entry func(unsafe.Pointer) |
| *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn) |
| newg.entry = entry |
| |
| newg.param = arg |
| newg.gopc = getcallerpc() |
| newg.ancestors = saveAncestors(_g_) |
| newg.startpc = fn |
| if _g_.m.curg != nil { |
| newg.labels = _g_.m.curg.labels |
| } |
| if isSystemGoroutine(newg, false) { |
| atomic.Xadd(&sched.ngsys, +1) |
| } |
| casgstatus(newg, _Gdead, _Grunnable) |
| |
| if _p_.goidcache == _p_.goidcacheend { |
| // Sched.goidgen is the last allocated id, |
| // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. |
| // At startup sched.goidgen=0, so main goroutine receives goid=1. |
| _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) |
| _p_.goidcache -= _GoidCacheBatch - 1 |
| _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch |
| } |
| newg.goid = int64(_p_.goidcache) |
| _p_.goidcache++ |
| if trace.enabled { |
| traceGoCreate(newg, newg.startpc) |
| } |
| |
| makeGContext(newg, sp, spsize) |
| |
| releasem(_g_.m) |
| |
| runqput(_p_, newg, true) |
| |
| if mainStarted { |
| wakep() |
| } |
| |
| return newg |
| } |
| |
| // expectedSystemGoroutines counts the number of goroutines expected |
| // to mark themselves as system goroutines. After they mark themselves |
| // by calling setSystemGoroutine, this is decremented. NumGoroutines |
| // uses this to wait for all system goroutines to mark themselves |
| // before it counts them. |
| var expectedSystemGoroutines uint32 |
| |
| // expectSystemGoroutine is called when starting a goroutine that will |
| // call setSystemGoroutine. It increments expectedSystemGoroutines. |
| func expectSystemGoroutine() { |
| atomic.Xadd(&expectedSystemGoroutines, +1) |
| } |
| |
| // waitForSystemGoroutines waits for all currently expected system |
| // goroutines to register themselves. |
| func waitForSystemGoroutines() { |
| for atomic.Load(&expectedSystemGoroutines) > 0 { |
| Gosched() |
| osyield() |
| } |
| } |
| |
| // setSystemGoroutine marks this goroutine as a "system goroutine". |
| // In the gc toolchain this is done by comparing startpc to a list of |
| // saved special PCs. In gccgo that approach does not work as startpc |
| // is often a thunk that invokes the real function with arguments, |
| // so the thunk address never matches the saved special PCs. Instead, |
| // since there are only a limited number of "system goroutines", |
| // we force each one to mark itself as special. |
| func setSystemGoroutine() { |
| getg().isSystemGoroutine = true |
| atomic.Xadd(&sched.ngsys, +1) |
| atomic.Xadd(&expectedSystemGoroutines, -1) |
| } |
| |
| // saveAncestors copies previous ancestors of the given caller g and |
| // includes infor for the current caller into a new set of tracebacks for |
| // a g being created. |
| func saveAncestors(callergp *g) *[]ancestorInfo { |
| // Copy all prior info, except for the root goroutine (goid 0). |
| if debug.tracebackancestors <= 0 || callergp.goid == 0 { |
| return nil |
| } |
| var callerAncestors []ancestorInfo |
| if callergp.ancestors != nil { |
| callerAncestors = *callergp.ancestors |
| } |
| n := int32(len(callerAncestors)) + 1 |
| if n > debug.tracebackancestors { |
| n = debug.tracebackancestors |
| } |
| ancestors := make([]ancestorInfo, n) |
| copy(ancestors[1:], callerAncestors) |
| |
| var pcs [_TracebackMaxFrames]uintptr |
| // FIXME: This should get a traceback of callergp. |
| // npcs := gcallers(callergp, 0, pcs[:]) |
| npcs := 0 |
| ipcs := make([]uintptr, npcs) |
| copy(ipcs, pcs[:]) |
| ancestors[0] = ancestorInfo{ |
| pcs: ipcs, |
| goid: callergp.goid, |
| gopc: callergp.gopc, |
| } |
| |
| ancestorsp := new([]ancestorInfo) |
| *ancestorsp = ancestors |
| return ancestorsp |
| } |
| |
| // Put on gfree list. |
| // If local list is too long, transfer a batch to the global list. |
| func gfput(_p_ *p, gp *g) { |
| if readgstatus(gp) != _Gdead { |
| throw("gfput: bad status (not Gdead)") |
| } |
| |
| _p_.gFree.push(gp) |
| _p_.gFree.n++ |
| if _p_.gFree.n >= 64 { |
| lock(&sched.gFree.lock) |
| for _p_.gFree.n >= 32 { |
| _p_.gFree.n-- |
| gp = _p_.gFree.pop() |
| sched.gFree.list.push(gp) |
| sched.gFree.n++ |
| } |
| unlock(&sched.gFree.lock) |
| } |
| } |
| |
| // Get from gfree list. |
| // If local list is empty, grab a batch from global list. |
| func gfget(_p_ *p) *g { |
| retry: |
| if _p_.gFree.empty() && !sched.gFree.list.empty() { |
| lock(&sched.gFree.lock) |
| // Move a batch of free Gs to the P. |
| for _p_.gFree.n < 32 { |
| gp := sched.gFree.list.pop() |
| if gp == nil { |
| break |
| } |
| sched.gFree.n-- |
| _p_.gFree.push(gp) |
| _p_.gFree.n++ |
| } |
| unlock(&sched.gFree.lock) |
| goto retry |
| } |
| gp := _p_.gFree.pop() |
| if gp == nil { |
| return nil |
| } |
| _p_.gFree.n-- |
| return gp |
| } |
| |
| // Purge all cached G's from gfree list to the global list. |
| func gfpurge(_p_ *p) { |
| lock(&sched.gFree.lock) |
| for !_p_.gFree.empty() { |
| gp := _p_.gFree.pop() |
| _p_.gFree.n-- |
| sched.gFree.list.push(gp) |
| sched.gFree.n++ |
| } |
| unlock(&sched.gFree.lock) |
| } |
| |
| // Breakpoint executes a breakpoint trap. |
| func Breakpoint() { |
| breakpoint() |
| } |
| |
| // dolockOSThread is called by LockOSThread and lockOSThread below |
| // after they modify m.locked. Do not allow preemption during this call, |
| // or else the m might be different in this function than in the caller. |
| //go:nosplit |
| func dolockOSThread() { |
| if GOARCH == "wasm" { |
| return // no threads on wasm yet |
| } |
| _g_ := getg() |
| _g_.m.lockedg.set(_g_) |
| _g_.lockedm.set(_g_.m) |
| } |
| |
| //go:nosplit |
| |
| // LockOSThread wires the calling goroutine to its current operating system thread. |
| // The calling goroutine will always execute in that thread, |
| // and no other goroutine will execute in it, |
| // until the calling goroutine has made as many calls to |
| // UnlockOSThread as to LockOSThread. |
| // If the calling goroutine exits without unlocking the thread, |
| // the thread will be terminated. |
| // |
| // All init functions are run on the startup thread. Calling LockOSThread |
| // from an init function will cause the main function to be invoked on |
| // that thread. |
| // |
| // A goroutine should call LockOSThread before calling OS services or |
| // non-Go library functions that depend on per-thread state. |
| func LockOSThread() { |
| if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { |
| // If we need to start a new thread from the locked |
| // thread, we need the template thread. Start it now |
| // while we're in a known-good state. |
| startTemplateThread() |
| } |
| _g_ := getg() |
| _g_.m.lockedExt++ |
| if _g_.m.lockedExt == 0 { |
| _g_.m.lockedExt-- |
| panic("LockOSThread nesting overflow") |
| } |
| dolockOSThread() |
| } |
| |
| //go:nosplit |
| func lockOSThread() { |
| getg().m.lockedInt++ |
| dolockOSThread() |
| } |
| |
| // dounlockOSThread is called by UnlockOSThread and unlockOSThread below |
| // after they update m->locked. Do not allow preemption during this call, |
| // or else the m might be in different in this function than in the caller. |
| //go:nosplit |
| func dounlockOSThread() { |
| if GOARCH == "wasm" { |
| return // no threads on wasm yet |
| } |
| _g_ := getg() |
| if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { |
| return |
| } |
| _g_.m.lockedg = 0 |
| _g_.lockedm = 0 |
| } |
| |
| //go:nosplit |
| |
| // UnlockOSThread undoes an earlier call to LockOSThread. |
| // If this drops the number of active LockOSThread calls on the |
| // calling goroutine to zero, it unwires the calling goroutine from |
| // its fixed operating system thread. |
| // If there are no active LockOSThread calls, this is a no-op. |
| // |
| // Before calling UnlockOSThread, the caller must ensure that the OS |
| // thread is suitable for running other goroutines. If the caller made |
| // any permanent changes to the state of the thread that would affect |
| // other goroutines, it should not call this function and thus leave |
| // the goroutine locked to the OS thread until the goroutine (and |
| // hence the thread) exits. |
| func UnlockOSThread() { |
| _g_ := getg() |
| if _g_.m.lockedExt == 0 { |
| return |
| } |
| _g_.m.lockedExt-- |
| dounlockOSThread() |
| } |
| |
| //go:nosplit |
| func unlockOSThread() { |
| _g_ := getg() |
| if _g_.m.lockedInt == 0 { |
| systemstack(badunlockosthread) |
| } |
| _g_.m.lockedInt-- |
| dounlockOSThread() |
| } |
| |
| func badunlockosthread() { |
| throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") |
| } |
| |
| func gcount() int32 { |
| n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) |
| for _, _p_ := range allp { |
| n -= _p_.gFree.n |
| } |
| |
| // All these variables can be changed concurrently, so the result can be inconsistent. |
| // But at least the current goroutine is running. |
| if n < 1 { |
| n = 1 |
| } |
| return n |
| } |
| |
| func mcount() int32 { |
| return int32(sched.mnext - sched.nmfreed) |
| } |
| |
| var prof struct { |
| signalLock uint32 |
| hz int32 |
| } |
| |
| func _System() { _System() } |
| func _ExternalCode() { _ExternalCode() } |
| func _LostExternalCode() { _LostExternalCode() } |
| func _GC() { _GC() } |
| func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } |
| func _VDSO() { _VDSO() } |
| |
| var _SystemPC = funcPC(_System) |
| var _ExternalCodePC = funcPC(_ExternalCode) |
| var _LostExternalCodePC = funcPC(_LostExternalCode) |
| var _GCPC = funcPC(_GC) |
| var _LostSIGPROFDuringAtomic64PC = funcPC(_LostSIGPROFDuringAtomic64) |
| |
| // Called if we receive a SIGPROF signal. |
| // Called by the signal handler, may run during STW. |
| //go:nowritebarrierrec |
| func sigprof(pc uintptr, gp *g, mp *m) { |
| if prof.hz == 0 { |
| return |
| } |
| |
| // If mp.profilehz is 0, then profiling is not enabled for this thread. |
| // We must check this to avoid a deadlock between setcpuprofilerate |
| // and the call to cpuprof.add, below. |
| if mp != nil && mp.profilehz == 0 { |
| return |
| } |
| // Profiling runs concurrently with GC, so it must not allocate. |
| // Set a trap in case the code does allocate. |
| // Note that on windows, one thread takes profiles of all the |
| // other threads, so mp is usually not getg().m. |
| // In fact mp may not even be stopped. |
| // See golang.org/issue/17165. |
| getg().m.mallocing++ |
| |
| // Define that a "user g" is a user-created goroutine, and a "system g" |
| // is one that is m->g0 or m->gsignal. |
| // |
| // We might be interrupted for profiling halfway through a |
| // goroutine switch. The switch involves updating three (or four) values: |
| // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, |
| // because once it gets updated the new g is running. |
| // |
| // When switching from a user g to a system g, LR is not considered live, |
| // so the update only affects g, SP, and PC. Since PC must be last, there |
| // the possible partial transitions in ordinary execution are (1) g alone is updated, |
| // (2) both g and SP are updated, and (3) SP alone is updated. |
| // If SP or g alone is updated, we can detect the partial transition by checking |
| // whether the SP is within g's stack bounds. (We could also require that SP |
| // be changed only after g, but the stack bounds check is needed by other |
| // cases, so there is no need to impose an additional requirement.) |
| // |
| // There is one exceptional transition to a system g, not in ordinary execution. |
| // When a signal arrives, the operating system starts the signal handler running |
| // with an updated PC and SP. The g is updated last, at the beginning of the |
| // handler. There are two reasons this is okay. First, until g is updated the |
| // g and SP do not match, so the stack bounds check detects the partial transition. |
| // Second, signal handlers currently run with signals disabled, so a profiling |
| // signal cannot arrive during the handler. |
| // |
| // When switching from a system g to a user g, there are three possibilities. |
| // |
| // First, it may be that the g switch has no PC update, because the SP |
| // either corresponds to a user g throughout (as in asmcgocall) |
| // or because it has been arranged to look like a user g frame |
| // (as in cgocallback). In this case, since the entire |
| // transition is a g+SP update, a partial transition updating just one of |
| // those will be detected by the stack bounds check. |
| // |
| // Second, when returning from a signal handler, the PC and SP updates |
| // are performed by the operating system in an atomic update, so the g |
| // update must be done before them. The stack bounds check detects |
| // the partial transition here, and (again) signal handlers run with signals |
| // disabled, so a profiling signal cannot arrive then anyway. |
| // |
| // Third, the common case: it may be that the switch updates g, SP, and PC |
| // separately. If the PC is within any of the functions that does this, |
| // we don't ask for a traceback. C.F. the function setsSP for more about this. |
| // |
| // There is another apparently viable approach, recorded here in case |
| // the "PC within setsSP function" check turns out not to be usable. |
| // It would be possible to delay the update of either g or SP until immediately |
| // before the PC update instruction. Then, because of the stack bounds check, |
| // the only problematic interrupt point is just before that PC update instruction, |
| // and the sigprof handler can detect that instruction and simulate stepping past |
| // it in order to reach a consistent state. On ARM, the update of g must be made |
| // in two places (in R10 and also in a TLS slot), so the delayed update would |
| // need to be the SP update. The sigprof handler must read the instruction at |
| // the current PC and if it was the known instruction (for example, JMP BX or |
| // MOV R2, PC), use that other register in place of the PC value. |
| // The biggest drawback to this solution is that it requires that we can tell |
| // whether it's safe to read from the memory pointed at by PC. |
| // In a correct program, we can test PC == nil and otherwise read, |
| // but if a profiling signal happens at the instant that a program executes |
| // a bad jump (before the program manages to handle the resulting fault) |
| // the profiling handler could fault trying to read nonexistent memory. |
| // |
| // To recap, there are no constraints on the assembly being used for the |
| // transition. We simply require that g and SP match and that the PC is not |
| // in gogo. |
| traceback := true |
| |
| // If SIGPROF arrived while already fetching runtime callers |
| // we can have trouble on older systems because the unwind |
| // library calls dl_iterate_phdr which was not reentrant in |
| // the past. alreadyInCallers checks for that. |
| if gp == nil || alreadyInCallers() { |
| traceback = false |
| } |
| |
| var stk [maxCPUProfStack]uintptr |
| n := 0 |
| if traceback { |
| var stklocs [maxCPUProfStack]location |
| n = callers(0, stklocs[:]) |
| |
| // Issue 26595: the stack trace we've just collected is going |
| // to include frames that we don't want to report in the CPU |
| // profile, including signal handler frames. Here is what we |
| // might typically see at the point of "callers" above for a |
| // signal delivered to the application routine "interesting" |
| // called by "main". |
| // |
| // 0: runtime.sigprof |
| // 1: runtime.sighandler |
| // 2: runtime.sigtrampgo |
| // 3: runtime.sigtramp |
| // 4: <signal handler called> |
| // 5: main.interesting_routine |
| // 6: main.main |
| // |
| // To ensure a sane profile, walk through the frames in |
| // "stklocs" until we find the "runtime.sigtramp" frame, then |
| // report only those frames below the frame one down from |
| // that. On systems that don't split stack, "sigtramp" can |
| // do a sibling call to "sigtrampgo", so use "sigtrampgo" |
| // if we don't find "sigtramp". If for some reason |
| // neither "runtime.sigtramp" nor "runtime.sigtrampgo" is |
| // present, don't make any changes. |
| framesToDiscard := 0 |
| for i := 0; i < n; i++ { |
| if stklocs[i].function == "runtime.sigtrampgo" && i+2 < n { |
| framesToDiscard = i + 2 |
| } |
| if stklocs[i].function == "runtime.sigtramp" && i+2 < n { |
| framesToDiscard = i + 2 |
| break |
| } |
| } |
| n -= framesToDiscard |
| for i := 0; i < n; i++ { |
| stk[i] = stklocs[i+framesToDiscard].pc |
| } |
| } |
| |
| if n <= 0 { |
| // Normal traceback is impossible or has failed. |
| // Account it against abstract "System" or "GC". |
| n = 2 |
| stk[0] = pc |
| if mp.preemptoff != "" { |
| stk[1] = _GCPC + sys.PCQuantum |
| } else { |
| stk[1] = _SystemPC + sys.PCQuantum |
| } |
| } |
| |
| if prof.hz != 0 { |
| cpuprof.add(gp, stk[:n]) |
| } |
| getg().m.mallocing-- |
| } |
| |
| // Use global arrays rather than using up lots of stack space in the |
| // signal handler. This is safe since while we are executing a SIGPROF |
| // signal other SIGPROF signals are blocked. |
| var nonprofGoStklocs [maxCPUProfStack]location |
| var nonprofGoStk [maxCPUProfStack]uintptr |
| |
| // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, |
| // and the signal handler collected a stack trace in sigprofCallers. |
| // When this is called, sigprofCallersUse will be non-zero. |
| // g is nil, and what we can do is very limited. |
| //go:nosplit |
| //go:nowritebarrierrec |
| func sigprofNonGo(pc uintptr) { |
| if prof.hz != 0 { |
| n := callers(0, nonprofGoStklocs[:]) |
| |
| for i := 0; i < n; i++ { |
| nonprofGoStk[i] = nonprofGoStklocs[i].pc |
| } |
| |
| if n <= 0 { |
| n = 2 |
| nonprofGoStk[0] = pc |
| nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum |
| } |
| |
| cpuprof.addNonGo(nonprofGoStk[:n]) |
| } |
| } |
| |
| // sigprofNonGoPC is called when a profiling signal arrived on a |
| // non-Go thread and we have a single PC value, not a stack trace. |
| // g is nil, and what we can do is very limited. |
| //go:nosplit |
| //go:nowritebarrierrec |
| func sigprofNonGoPC(pc uintptr) { |
| if prof.hz != 0 { |
| stk := []uintptr{ |
| pc, |
| _ExternalCodePC + sys.PCQuantum, |
| } |
| cpuprof.addNonGo(stk) |
| } |
| } |
| |
| // setcpuprofilerate sets the CPU profiling rate to hz times per second. |
| // If hz <= 0, setcpuprofilerate turns off CPU profiling. |
| func setcpuprofilerate(hz int32) { |
| // Force sane arguments. |
| if hz < 0 { |
| hz = 0 |
| } |
| |
| // Disable preemption, otherwise we can be rescheduled to another thread |
| // that has profiling enabled. |
| _g_ := getg() |
| _g_.m.locks++ |
| |
| // Stop profiler on this thread so that it is safe to lock prof. |
| // if a profiling signal came in while we had prof locked, |
| // it would deadlock. |
| setThreadCPUProfiler(0) |
| |
| for !atomic.Cas(&prof.signalLock, 0, 1) { |
| osyield() |
| } |
| if prof.hz != hz { |
| setProcessCPUProfiler(hz) |
| prof.hz = hz |
| } |
| atomic.Store(&prof.signalLock, 0) |
| |
| lock(&sched.lock) |
| sched.profilehz = hz |
| unlock(&sched.lock) |
| |
| if hz != 0 { |
| setThreadCPUProfiler(hz) |
| } |
| |
| _g_.m.locks-- |
| } |
| |
| // init initializes pp, which may be a freshly allocated p or a |
| // previously destroyed p, and transitions it to status _Pgcstop. |
| func (pp *p) init(id int32) { |
| pp.id = id |
| pp.status = _Pgcstop |
| pp.sudogcache = pp.sudogbuf[:0] |
| pp.deferpool = pp.deferpoolbuf[:0] |
| pp.wbBuf.reset() |
| if pp.mcache == nil { |
| if id == 0 { |
| if mcache0 == nil { |
| throw("missing mcache?") |
| } |
| // Use the bootstrap mcache0. Only one P will get |
| // mcache0: the one with ID 0. |
| pp.mcache = mcache0 |
| } else { |
| pp.mcache = allocmcache() |
| } |
| } |
| if raceenabled && pp.raceprocctx == 0 { |
| if id == 0 { |
| pp.raceprocctx = raceprocctx0 |
| raceprocctx0 = 0 // bootstrap |
| } else { |
| pp
|