blob: 454a4a2175bd4b0505d357981b53f878acc1af62 [file] [log] [blame]
Ken Thompsonaf58f172008-07-14 14:34:27 -07001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5#include "runtime.h"
Hector Chu6bfe5f52010-01-06 17:58:55 -08006#include "defs.h"
Russ Cox1ce17912009-01-26 17:37:05 -08007#include "malloc.h"
Hector Chu6bfe5f52010-01-06 17:58:55 -08008#include "os.h"
Ken Thompsonaf58f172008-07-14 14:34:27 -07009
Russ Cox83727cc2010-03-29 21:48:22 -070010static void unwindstack(G*, byte*);
11
Russ Coxd28acc42008-08-04 16:43:49 -070012typedef struct Sched Sched;
13
14M m0;
15G g0; // idle goroutine for m0
16
Ken Thompsonaf58f172008-07-14 14:34:27 -070017static int32 debug = 0;
18
Russ Cox5328df62010-01-09 09:47:45 -080019int32 gcwaiting;
20
Russ Cox96824002008-08-05 14:18:47 -070021// Go scheduler
22//
23// The go scheduler's job is to match ready-to-run goroutines (`g's)
24// with waiting-for-work schedulers (`m's). If there are ready gs
25// and no waiting ms, ready() will start a new m running in a new
26// OS thread, so that all ready gs can run simultaneously, up to a limit.
27// For now, ms never go away.
28//
Russ Coxfe1e4922009-11-10 19:59:22 -080029// By default, Go keeps only one kernel thread (m) running user code
30// at a single time; other threads may be blocked in the operating system.
31// Setting the environment variable $GOMAXPROCS or calling
32// runtime.GOMAXPROCS() will change the number of user threads
33// allowed to execute simultaneously. $GOMAXPROCS is thus an
34// approximation of the maximum number of cores to use.
Russ Cox96824002008-08-05 14:18:47 -070035//
36// Even a program that can run without deadlock in a single process
37// might use more ms if given the chance. For example, the prime
38// sieve will use as many ms as there are primes (up to sched.mmax),
39// allowing different stages of the pipeline to execute in parallel.
40// We could revisit this choice, only kicking off new ms for blocking
41// system calls, but that would limit the amount of parallel computation
42// that go would try to do.
43//
44// In general, one could imagine all sorts of refinements to the
45// scheduler, but the goal now is just to get something working on
46// Linux and OS X.
47
Russ Coxd28acc42008-08-04 16:43:49 -070048struct Sched {
Russ Coxd28acc42008-08-04 16:43:49 -070049 Lock;
Russ Cox96824002008-08-05 14:18:47 -070050
51 G *gfree; // available gs (status == Gdead)
Russ Coxf7f63292008-08-05 14:21:42 -070052
Russ Cox96824002008-08-05 14:18:47 -070053 G *ghead; // gs waiting to run
54 G *gtail;
55 int32 gwait; // number of gs waiting to run
56 int32 gcount; // number of gs that are alive
Russ Coxf7f63292008-08-05 14:21:42 -070057
Russ Cox96824002008-08-05 14:18:47 -070058 M *mhead; // ms waiting for work
59 int32 mwait; // number of ms waiting for work
Russ Coxefc86a72008-11-25 16:48:10 -080060 int32 mcount; // number of ms that have been created
61 int32 mcpu; // number of ms executing on cpu
62 int32 mcpumax; // max number of ms allowed on cpu
Russ Cox3f8aa662008-12-05 15:24:18 -080063 int32 gomaxprocs;
Russ Coxefc86a72008-11-25 16:48:10 -080064 int32 msyscall; // number of ms in system calls
Russ Coxf7f63292008-08-05 14:21:42 -070065
Russ Cox96824002008-08-05 14:18:47 -070066 int32 predawn; // running initialization, don't run new gs.
Russ Cox3f8aa662008-12-05 15:24:18 -080067
68 Note stopped; // one g can wait here for ms to stop
Russ Coxbe629132008-12-08 17:14:08 -080069 int32 waitstop; // after setting this flag
Russ Coxd28acc42008-08-04 16:43:49 -070070};
71
72Sched sched;
73
Russ Cox96824002008-08-05 14:18:47 -070074// Scheduling helpers. Sched must be locked.
75static void gput(G*); // put/get on ghead/gtail
76static G* gget(void);
77static void mput(M*); // put/get on mhead
Russ Cox218c3932009-07-13 17:28:39 -070078static M* mget(G*);
Russ Cox96824002008-08-05 14:18:47 -070079static void gfput(G*); // put/get on gfree
80static G* gfget(void);
Russ Coxefc86a72008-11-25 16:48:10 -080081static void matchmg(void); // match ms to gs
Russ Cox96824002008-08-05 14:18:47 -070082static void readylocked(G*); // ready, but sched is locked
Russ Cox218c3932009-07-13 17:28:39 -070083static void mnextg(M*, G*);
Russ Cox96824002008-08-05 14:18:47 -070084
85// Scheduler loop.
86static void scheduler(void);
87
Russ Coxa67258f2008-09-18 15:56:46 -070088// The bootstrap sequence is:
89//
90// call osinit
91// call schedinit
92// make & queue new G
93// call mstart
94//
95// The new G does:
96//
97// call main·init_function
98// call initdone
99// call main·main
Russ Cox96824002008-08-05 14:18:47 -0700100void
101schedinit(void)
102{
103 int32 n;
104 byte *p;
Russ Coxadd89dd2009-10-12 10:26:38 -0700105
Russ Cox93689d82009-10-09 15:35:33 -0700106 allm = m;
Russ Cox6eb251f2010-03-24 09:40:09 -0700107 m->nomemprof++;
Russ Coxf7f63292008-08-05 14:21:42 -0700108
Russ Coxe29ce172008-12-18 15:42:28 -0800109 mallocinit();
Russ Cox36096242009-01-16 14:58:14 -0800110 goargs();
111
Russ Cox0d3301a2009-12-07 15:52:14 -0800112 // For debugging:
Russ Coxda0a7d72008-12-19 03:13:39 -0800113 // Allocate internal symbol table representation now,
114 // so that we don't need to call malloc when we crash.
Russ Cox0d3301a2009-12-07 15:52:14 -0800115 // findfunc(0);
Russ Coxe29ce172008-12-18 15:42:28 -0800116
Russ Cox3f8aa662008-12-05 15:24:18 -0800117 sched.gomaxprocs = 1;
Russ Cox9350ef42008-09-17 13:49:23 -0700118 p = getenv("GOMAXPROCS");
Russ Cox96824002008-08-05 14:18:47 -0700119 if(p != nil && (n = atoi(p)) != 0)
Russ Cox3f8aa662008-12-05 15:24:18 -0800120 sched.gomaxprocs = n;
121 sched.mcpumax = sched.gomaxprocs;
Russ Cox96824002008-08-05 14:18:47 -0700122 sched.mcount = 1;
123 sched.predawn = 1;
Russ Cox6eb251f2010-03-24 09:40:09 -0700124
125 m->nomemprof--;
Russ Cox96824002008-08-05 14:18:47 -0700126}
127
Russ Coxa67258f2008-09-18 15:56:46 -0700128// Called after main·init_function; main·main will be called on return.
Russ Cox96824002008-08-05 14:18:47 -0700129void
Russ Coxa67258f2008-09-18 15:56:46 -0700130initdone(void)
Russ Cox96824002008-08-05 14:18:47 -0700131{
Russ Cox96824002008-08-05 14:18:47 -0700132 // Let's go.
133 sched.predawn = 0;
Russ Cox1ce17912009-01-26 17:37:05 -0800134 mstats.enablegc = 1;
Russ Cox96824002008-08-05 14:18:47 -0700135
Russ Cox96824002008-08-05 14:18:47 -0700136 // If main·init_function started other goroutines,
137 // kick off new ms to handle them, like ready
138 // would have, had it not been pre-dawn.
Russ Coxefc86a72008-11-25 16:48:10 -0800139 lock(&sched);
140 matchmg();
141 unlock(&sched);
Russ Cox96824002008-08-05 14:18:47 -0700142}
143
Ken Thompsonaf58f172008-07-14 14:34:27 -0700144void
Russ Cox918afd942009-05-08 15:21:41 -0700145goexit(void)
Ken Thompsonaf58f172008-07-14 14:34:27 -0700146{
Russ Cox96824002008-08-05 14:18:47 -0700147 g->status = Gmoribund;
Russ Cox918afd942009-05-08 15:21:41 -0700148 gosched();
Ken Thompsonaf58f172008-07-14 14:34:27 -0700149}
150
Ken Thompson1e1cc4e2009-01-27 12:03:53 -0800151void
Rob Pike3835e012008-07-28 11:29:41 -0700152tracebackothers(G *me)
153{
154 G *g;
155
156 for(g = allg; g != nil; g = g->alllink) {
Russ Coxd28acc42008-08-04 16:43:49 -0700157 if(g == me || g->status == Gdead)
Rob Pike3835e012008-07-28 11:29:41 -0700158 continue;
Russ Cox4dfd7fd2009-11-17 14:42:08 -0800159 printf("\ngoroutine %d [%d]:\n", g->goid, g->status);
Russ Cox6c196012010-04-05 12:51:09 -0700160 traceback(g->sched.pc, g->sched.sp, 0, g);
Rob Pike3835e012008-07-28 11:29:41 -0700161 }
162}
163
Russ Cox96824002008-08-05 14:18:47 -0700164// Put on `g' queue. Sched must be locked.
Russ Coxd28acc42008-08-04 16:43:49 -0700165static void
Russ Cox96824002008-08-05 14:18:47 -0700166gput(G *g)
Russ Coxd28acc42008-08-04 16:43:49 -0700167{
Russ Cox218c3932009-07-13 17:28:39 -0700168 M *m;
169
170 // If g is wired, hand it off directly.
171 if((m = g->lockedm) != nil) {
172 mnextg(m, g);
173 return;
174 }
175
Russ Cox96824002008-08-05 14:18:47 -0700176 g->schedlink = nil;
177 if(sched.ghead == nil)
178 sched.ghead = g;
Russ Coxd28acc42008-08-04 16:43:49 -0700179 else
Russ Cox96824002008-08-05 14:18:47 -0700180 sched.gtail->schedlink = g;
181 sched.gtail = g;
182 sched.gwait++;
Russ Coxd28acc42008-08-04 16:43:49 -0700183}
184
Russ Cox96824002008-08-05 14:18:47 -0700185// Get from `g' queue. Sched must be locked.
186static G*
187gget(void)
188{
189 G *g;
Russ Coxf7f63292008-08-05 14:21:42 -0700190
Russ Cox96824002008-08-05 14:18:47 -0700191 g = sched.ghead;
192 if(g){
193 sched.ghead = g->schedlink;
194 if(sched.ghead == nil)
195 sched.gtail = nil;
196 sched.gwait--;
197 }
198 return g;
199}
200
201// Put on `m' list. Sched must be locked.
202static void
203mput(M *m)
204{
205 m->schedlink = sched.mhead;
206 sched.mhead = m;
207 sched.mwait++;
208}
209
Russ Cox218c3932009-07-13 17:28:39 -0700210// Get an `m' to run `g'. Sched must be locked.
Russ Cox96824002008-08-05 14:18:47 -0700211static M*
Russ Cox218c3932009-07-13 17:28:39 -0700212mget(G *g)
Russ Cox96824002008-08-05 14:18:47 -0700213{
214 M *m;
Russ Coxf7f63292008-08-05 14:21:42 -0700215
Russ Cox218c3932009-07-13 17:28:39 -0700216 // if g has its own m, use it.
217 if((m = g->lockedm) != nil)
218 return m;
219
220 // otherwise use general m pool.
221 if((m = sched.mhead) != nil){
Russ Cox96824002008-08-05 14:18:47 -0700222 sched.mhead = m->schedlink;
223 sched.mwait--;
224 }
225 return m;
226}
227
Russ Cox96824002008-08-05 14:18:47 -0700228// Mark g ready to run.
Russ Coxd28acc42008-08-04 16:43:49 -0700229void
230ready(G *g)
231{
Russ Coxd28acc42008-08-04 16:43:49 -0700232 lock(&sched);
Russ Cox96824002008-08-05 14:18:47 -0700233 readylocked(g);
234 unlock(&sched);
Russ Coxd28acc42008-08-04 16:43:49 -0700235}
236
Russ Coxa61bb952008-09-24 14:13:07 -0700237// Mark g ready to run. Sched is already locked.
238// G might be running already and about to stop.
239// The sched lock protects g->status from changing underfoot.
Russ Cox96824002008-08-05 14:18:47 -0700240static void
241readylocked(G *g)
242{
Russ Coxa61bb952008-09-24 14:13:07 -0700243 if(g->m){
244 // Running on another machine.
245 // Ready it when it stops.
246 g->readyonstop = 1;
247 return;
248 }
Russ Coxd28acc42008-08-04 16:43:49 -0700249
Russ Cox96824002008-08-05 14:18:47 -0700250 // Mark runnable.
251 if(g->status == Grunnable || g->status == Grunning)
252 throw("bad g->status in ready");
253 g->status = Grunnable;
254
Russ Coxefc86a72008-11-25 16:48:10 -0800255 gput(g);
256 if(!sched.predawn)
257 matchmg();
Russ Cox96824002008-08-05 14:18:47 -0700258}
259
Austin Clementsad9c6f72009-09-18 09:11:19 -0700260static void
261nop(void)
262{
263}
264
Russ Coxfe8ff952009-08-31 18:10:11 -0700265// Same as readylocked but a different symbol so that
266// debuggers can set a breakpoint here and catch all
267// new goroutines.
268static void
269newprocreadylocked(G *g)
270{
Austin Clementsad9c6f72009-09-18 09:11:19 -0700271 nop(); // avoid inlining in 6l
Russ Coxfe8ff952009-08-31 18:10:11 -0700272 readylocked(g);
273}
274
Russ Cox218c3932009-07-13 17:28:39 -0700275// Pass g to m for running.
276static void
277mnextg(M *m, G *g)
278{
279 sched.mcpu++;
280 m->nextg = g;
281 if(m->waitnextg) {
282 m->waitnextg = 0;
283 notewakeup(&m->havenextg);
284 }
285}
286
Russ Cox96824002008-08-05 14:18:47 -0700287// Get the next goroutine that m should run.
288// Sched must be locked on entry, is unlocked on exit.
Russ Coxefc86a72008-11-25 16:48:10 -0800289// Makes sure that at most $GOMAXPROCS gs are
290// running on cpus (not in system calls) at any given time.
Russ Cox96824002008-08-05 14:18:47 -0700291static G*
292nextgandunlock(void)
Ken Thompsonaf58f172008-07-14 14:34:27 -0700293{
Ken Thompsone7d549f2008-07-16 13:50:23 -0700294 G *gp;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700295
Russ Cox218c3932009-07-13 17:28:39 -0700296 if(sched.mcpu < 0)
297 throw("negative sched.mcpu");
298
299 // If there is a g waiting as m->nextg,
300 // mnextg took care of the sched.mcpu++.
Russ Coxefc86a72008-11-25 16:48:10 -0800301 if(m->nextg != nil) {
302 gp = m->nextg;
303 m->nextg = nil;
Russ Cox96824002008-08-05 14:18:47 -0700304 unlock(&sched);
305 return gp;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700306 }
Russ Cox96824002008-08-05 14:18:47 -0700307
Russ Cox218c3932009-07-13 17:28:39 -0700308 if(m->lockedg != nil) {
309 // We can only run one g, and it's not available.
310 // Make sure some other cpu is running to handle
311 // the ordinary run queue.
312 if(sched.gwait != 0)
313 matchmg();
314 } else {
315 // Look for work on global queue.
316 while(sched.mcpu < sched.mcpumax && (gp=gget()) != nil) {
317 if(gp->lockedm) {
318 mnextg(gp->lockedm, gp);
319 continue;
320 }
321 sched.mcpu++; // this m will run gp
322 unlock(&sched);
323 return gp;
Russ Coxefc86a72008-11-25 16:48:10 -0800324 }
Russ Cox218c3932009-07-13 17:28:39 -0700325 // Otherwise, wait on global m queue.
326 mput(m);
Russ Coxefc86a72008-11-25 16:48:10 -0800327 }
Russ Coxefc86a72008-11-25 16:48:10 -0800328 if(sched.mcpu == 0 && sched.msyscall == 0)
Russ Cox72154b02008-09-26 14:10:26 -0700329 throw("all goroutines are asleep - deadlock!");
Russ Cox96824002008-08-05 14:18:47 -0700330 m->nextg = nil;
Russ Cox218c3932009-07-13 17:28:39 -0700331 m->waitnextg = 1;
Russ Cox96824002008-08-05 14:18:47 -0700332 noteclear(&m->havenextg);
Russ Cox53e69e12009-01-27 14:01:20 -0800333 if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
Russ Coxbe629132008-12-08 17:14:08 -0800334 sched.waitstop = 0;
335 notewakeup(&sched.stopped);
336 }
Russ Cox96824002008-08-05 14:18:47 -0700337 unlock(&sched);
Russ Coxf7f63292008-08-05 14:21:42 -0700338
Russ Cox96824002008-08-05 14:18:47 -0700339 notesleep(&m->havenextg);
340 if((gp = m->nextg) == nil)
341 throw("bad m->nextg in nextgoroutine");
342 m->nextg = nil;
Russ Coxd28acc42008-08-04 16:43:49 -0700343 return gp;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700344}
345
Russ Cox3f8aa662008-12-05 15:24:18 -0800346// TODO(rsc): Remove. This is only temporary,
347// for the mark and sweep collector.
348void
349stoptheworld(void)
350{
351 lock(&sched);
Russ Cox5328df62010-01-09 09:47:45 -0800352 gcwaiting = 1;
Russ Cox3f8aa662008-12-05 15:24:18 -0800353 sched.mcpumax = 1;
354 while(sched.mcpu > 1) {
Russ Cox88ce9ce2010-04-06 13:48:31 -0700355 // It would be unsafe for multiple threads to be using
356 // the stopped note at once, but there is only
357 // ever one thread doing garbage collection,
358 // so this is okay.
Russ Cox3f8aa662008-12-05 15:24:18 -0800359 noteclear(&sched.stopped);
Russ Coxbe629132008-12-08 17:14:08 -0800360 sched.waitstop = 1;
Russ Cox3f8aa662008-12-05 15:24:18 -0800361 unlock(&sched);
362 notesleep(&sched.stopped);
363 lock(&sched);
364 }
365 unlock(&sched);
366}
367
368// TODO(rsc): Remove. This is only temporary,
369// for the mark and sweep collector.
370void
371starttheworld(void)
372{
373 lock(&sched);
Russ Cox5328df62010-01-09 09:47:45 -0800374 gcwaiting = 0;
Russ Cox3f8aa662008-12-05 15:24:18 -0800375 sched.mcpumax = sched.gomaxprocs;
376 matchmg();
377 unlock(&sched);
378}
379
Russ Coxa67258f2008-09-18 15:56:46 -0700380// Called to start an M.
381void
382mstart(void)
383{
Russ Cox74a9fc12010-01-06 19:24:11 -0800384 if(g != m->g0)
Devon H. O'Dell60b1a172009-11-18 16:51:59 -0800385 throw("bad mstart");
Russ Coxe29ce172008-12-18 15:42:28 -0800386 if(m->mcache == nil)
387 m->mcache = allocmcache();
Russ Coxa67258f2008-09-18 15:56:46 -0700388 minit();
389 scheduler();
390}
391
Russ Cox133a1582009-10-03 10:37:12 -0700392// When running with cgo, we call libcgo_thread_start
393// to start threads for us so that we can play nicely with
394// foreign code.
395void (*libcgo_thread_start)(void*);
396
397typedef struct CgoThreadStart CgoThreadStart;
398struct CgoThreadStart
399{
400 M *m;
401 G *g;
402 void (*fn)(void);
403};
404
405// Kick off new ms as needed (up to mcpumax).
Russ Coxefc86a72008-11-25 16:48:10 -0800406// There are already `other' other cpus that will
407// start looking for goroutines shortly.
408// Sched is locked.
409static void
410matchmg(void)
411{
Russ Coxefc86a72008-11-25 16:48:10 -0800412 G *g;
413
Russ Cox41554e22009-11-17 22:00:30 -0800414 if(m->mallocing || m->gcing)
Russ Cox4dfd7fd2009-11-17 14:42:08 -0800415 return;
Russ Coxefc86a72008-11-25 16:48:10 -0800416 while(sched.mcpu < sched.mcpumax && (g = gget()) != nil){
Russ Cox4dfd7fd2009-11-17 14:42:08 -0800417 M *m;
418
Russ Cox218c3932009-07-13 17:28:39 -0700419 // Find the m that will run g.
420 if((m = mget(g)) == nil){
Russ Cox1ce17912009-01-26 17:37:05 -0800421 m = malloc(sizeof(M));
Russ Cox93689d82009-10-09 15:35:33 -0700422 // Add to allm so garbage collector doesn't free m
423 // when it is just in a register (R14 on amd64).
424 m->alllink = allm;
425 allm = m;
Russ Coxefc86a72008-11-25 16:48:10 -0800426 m->id = sched.mcount++;
Russ Cox133a1582009-10-03 10:37:12 -0700427
428 if(libcgo_thread_start != nil) {
429 CgoThreadStart ts;
Russ Coxf25586a2010-02-10 00:00:12 -0800430 // pthread_create will make us a stack.
431 m->g0 = malg(-1);
Russ Cox133a1582009-10-03 10:37:12 -0700432 ts.m = m;
433 ts.g = m->g0;
434 ts.fn = mstart;
435 runcgo(libcgo_thread_start, &ts);
Russ Coxf25586a2010-02-10 00:00:12 -0800436 } else {
437 m->g0 = malg(8192);
Russ Cox133a1582009-10-03 10:37:12 -0700438 newosproc(m, m->g0, m->g0->stackbase, mstart);
Russ Coxf25586a2010-02-10 00:00:12 -0800439 }
Russ Coxefc86a72008-11-25 16:48:10 -0800440 }
Russ Cox218c3932009-07-13 17:28:39 -0700441 mnextg(m, g);
Russ Coxefc86a72008-11-25 16:48:10 -0800442 }
443}
444
Russ Cox96824002008-08-05 14:18:47 -0700445// Scheduler loop: find g to run, run it, repeat.
446static void
Russ Cox4feda712008-08-02 22:34:04 -0700447scheduler(void)
448{
449 G* gp;
Russ Coxd28acc42008-08-04 16:43:49 -0700450
Russ Coxd28acc42008-08-04 16:43:49 -0700451 lock(&sched);
Russ Cox7343e032009-06-17 15:12:16 -0700452 if(gosave(&m->sched) != 0){
Russ Cox133a1582009-10-03 10:37:12 -0700453 gp = m->curg;
Russ Cox9b1507b2010-03-31 11:46:01 -0700454 if(gp->status == Grecovery) {
455 // switched to scheduler to get stack unwound.
456 // don't go through the full scheduling logic.
457 Defer *d;
458
459 d = gp->defer;
460 gp->defer = d->link;
461
462 // unwind to the stack frame with d->sp in it.
463 unwindstack(gp, d->sp);
464 if(d->sp < gp->stackguard || gp->stackbase < d->sp)
465 throw("bad stack in recovery");
466
467 // make the deferproc for this d return again,
468 // this time returning 1. function will jump to
469 // standard return epilogue.
470 // the -2*sizeof(uintptr) makes up for the
471 // two extra words that are on the stack at
472 // each call to deferproc.
473 // (the pc we're returning to does pop pop
474 // before it tests the return value.)
Russ Cox6c196012010-04-05 12:51:09 -0700475 gp->sched.sp = getcallersp(d->sp - 2*sizeof(uintptr));
Russ Cox9b1507b2010-03-31 11:46:01 -0700476 gp->sched.pc = d->pc;
477 free(d);
478 gogo(&gp->sched, 1);
479 }
Russ Cox133a1582009-10-03 10:37:12 -0700480
Russ Coxa67258f2008-09-18 15:56:46 -0700481 // Jumped here via gosave/gogo, so didn't
Russ Cox96824002008-08-05 14:18:47 -0700482 // execute lock(&sched) above.
483 lock(&sched);
Russ Cox72154b02008-09-26 14:10:26 -0700484
Russ Coxa67258f2008-09-18 15:56:46 -0700485 if(sched.predawn)
486 throw("init sleeping");
Russ Cox96824002008-08-05 14:18:47 -0700487
Russ Cox133a1582009-10-03 10:37:12 -0700488 // Just finished running gp.
Russ Coxa61bb952008-09-24 14:13:07 -0700489 gp->m = nil;
Russ Coxefc86a72008-11-25 16:48:10 -0800490 sched.mcpu--;
Russ Cox218c3932009-07-13 17:28:39 -0700491
492 if(sched.mcpu < 0)
493 throw("sched.mcpu < 0 in scheduler");
Russ Coxd28acc42008-08-04 16:43:49 -0700494 switch(gp->status){
Russ Coxd28acc42008-08-04 16:43:49 -0700495 case Grunnable:
Russ Cox96824002008-08-05 14:18:47 -0700496 case Gdead:
497 // Shouldn't have been running!
498 throw("bad gp->status in sched");
499 case Grunning:
500 gp->status = Grunnable;
501 gput(gp);
502 break;
503 case Gmoribund:
504 gp->status = Gdead;
Russ Cox218c3932009-07-13 17:28:39 -0700505 if(gp->lockedm) {
506 gp->lockedm = nil;
507 m->lockedg = nil;
508 }
Russ Cox83727cc2010-03-29 21:48:22 -0700509 unwindstack(gp, nil);
Russ Cox2aea4a02009-08-26 15:26:09 -0700510 gfput(gp);
Russ Cox96824002008-08-05 14:18:47 -0700511 if(--sched.gcount == 0)
Russ Cox918afd942009-05-08 15:21:41 -0700512 exit(0);
Russ Coxd28acc42008-08-04 16:43:49 -0700513 break;
514 }
Russ Coxa61bb952008-09-24 14:13:07 -0700515 if(gp->readyonstop){
516 gp->readyonstop = 0;
517 readylocked(gp);
518 }
Russ Coxd28acc42008-08-04 16:43:49 -0700519 }
520
Russ Cox96824002008-08-05 14:18:47 -0700521 // Find (or wait for) g to run. Unlocks sched.
522 gp = nextgandunlock();
Russ Coxa61bb952008-09-24 14:13:07 -0700523 gp->readyonstop = 0;
Russ Coxd28acc42008-08-04 16:43:49 -0700524 gp->status = Grunning;
Russ Cox4feda712008-08-02 22:34:04 -0700525 m->curg = gp;
Russ Coxa61bb952008-09-24 14:13:07 -0700526 gp->m = m;
Russ Cox7343e032009-06-17 15:12:16 -0700527 if(gp->sched.pc == (byte*)goexit) // kickoff
528 gogocall(&gp->sched, (void(*)(void))gp->entry);
529 gogo(&gp->sched, 1);
Russ Cox4feda712008-08-02 22:34:04 -0700530}
531
Russ Cox96824002008-08-05 14:18:47 -0700532// Enter scheduler. If g->status is Grunning,
533// re-queues g and runs everyone else who is waiting
534// before running g again. If g->status is Gmoribund,
535// kills off g.
Ken Thompsonaf58f172008-07-14 14:34:27 -0700536void
Russ Cox918afd942009-05-08 15:21:41 -0700537gosched(void)
Ken Thompsonaf58f172008-07-14 14:34:27 -0700538{
Russ Cox71108812010-01-12 10:03:02 -0800539 if(m->locks != 0)
540 throw("gosched holding locks");
Russ Cox74a9fc12010-01-06 19:24:11 -0800541 if(g == m->g0)
Russ Cox1ce17912009-01-26 17:37:05 -0800542 throw("gosched of g0");
Russ Cox7343e032009-06-17 15:12:16 -0700543 if(gosave(&g->sched) == 0)
544 gogo(&m->sched, 1);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700545}
546
Russ Coxefc86a72008-11-25 16:48:10 -0800547// The goroutine g is about to enter a system call.
548// Record that it's not using the cpu anymore.
549// This is called only from the go syscall library, not
550// from the low-level system calls used by the runtime.
Russ Coxefc86a72008-11-25 16:48:10 -0800551void
Russ Cox718be322010-01-25 18:52:55 -0800552·entersyscall(void)
Russ Cox96824002008-08-05 14:18:47 -0700553{
Russ Coxefc86a72008-11-25 16:48:10 -0800554 lock(&sched);
Russ Cox052a66b2009-07-21 19:43:27 -0700555 if(sched.predawn) {
556 unlock(&sched);
557 return;
558 }
Russ Cox3f8aa662008-12-05 15:24:18 -0800559 g->status = Gsyscall;
Russ Cox36835c72009-06-15 21:30:53 -0700560 // Leave SP around for gc and traceback.
561 // Do before notewakeup so that gc
562 // never sees Gsyscall with wrong stack.
563 gosave(&g->sched);
Russ Coxefc86a72008-11-25 16:48:10 -0800564 sched.mcpu--;
565 sched.msyscall++;
566 if(sched.gwait != 0)
567 matchmg();
Russ Cox53e69e12009-01-27 14:01:20 -0800568 if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
569 sched.waitstop = 0;
570 notewakeup(&sched.stopped);
571 }
Russ Coxefc86a72008-11-25 16:48:10 -0800572 unlock(&sched);
573}
574
575// The goroutine g exited its system call.
576// Arrange for it to run on a cpu again.
577// This is called only from the go syscall library, not
578// from the low-level system calls used by the runtime.
579void
Russ Cox718be322010-01-25 18:52:55 -0800580·exitsyscall(void)
Russ Coxefc86a72008-11-25 16:48:10 -0800581{
Russ Coxefc86a72008-11-25 16:48:10 -0800582 lock(&sched);
Russ Cox052a66b2009-07-21 19:43:27 -0700583 if(sched.predawn) {
584 unlock(&sched);
585 return;
586 }
Russ Coxefc86a72008-11-25 16:48:10 -0800587 sched.msyscall--;
588 sched.mcpu++;
589 // Fast path - if there's room for this m, we're done.
590 if(sched.mcpu <= sched.mcpumax) {
Russ Cox19c18352009-12-14 19:06:20 -0800591 g->status = Grunning;
Russ Coxefc86a72008-11-25 16:48:10 -0800592 unlock(&sched);
593 return;
594 }
Russ Cox19c18352009-12-14 19:06:20 -0800595 // Tell scheduler to put g back on the run queue:
596 // mostly equivalent to g->status = Grunning,
597 // but keeps the garbage collector from thinking
598 // that g is running right now, which it's not.
599 g->readyonstop = 1;
Russ Coxefc86a72008-11-25 16:48:10 -0800600 unlock(&sched);
601
602 // Slow path - all the cpus are taken.
603 // The scheduler will ready g and put this m to sleep.
Russ Cox218c3932009-07-13 17:28:39 -0700604 // When the scheduler takes g away from m,
Russ Coxefc86a72008-11-25 16:48:10 -0800605 // it will undo the sched.mcpu++ above.
Russ Cox918afd942009-05-08 15:21:41 -0700606 gosched();
Russ Cox96824002008-08-05 14:18:47 -0700607}
608
Russ Cox95100342009-04-01 00:26:00 -0700609/*
610 * stack layout parameters.
611 * known to linkers.
612 *
613 * g->stackguard is set to point StackGuard bytes
614 * above the bottom of the stack. each function
615 * compares its stack pointer against g->stackguard
616 * to check for overflow. to cut one instruction from
617 * the check sequence for functions with tiny frames,
618 * the stack is allowed to protrude StackSmall bytes
619 * below the stack guard. functions with large frames
620 * don't bother with the check and always call morestack.
621 * the sequences are:
622 *
Russ Coxd6c59ad2009-04-02 16:41:53 -0700623 * guard = g->stackguard
624 * frame = function's stack frame size
625 * argsize = size of function arguments (call + return)
626 *
Russ Cox95100342009-04-01 00:26:00 -0700627 * stack frame size <= StackSmall:
628 * CMPQ guard, SP
629 * JHI 3(PC)
Russ Coxa9996d02009-04-13 15:22:36 -0700630 * MOVQ m->morearg, $(argsize << 32)
Russ Cox95100342009-04-01 00:26:00 -0700631 * CALL sys.morestack(SB)
632 *
633 * stack frame size > StackSmall but < StackBig
634 * LEAQ (frame-StackSmall)(SP), R0
635 * CMPQ guard, R0
636 * JHI 3(PC)
Russ Coxa9996d02009-04-13 15:22:36 -0700637 * MOVQ m->morearg, $(argsize << 32)
Russ Cox95100342009-04-01 00:26:00 -0700638 * CALL sys.morestack(SB)
639 *
640 * stack frame size >= StackBig:
Russ Coxa9996d02009-04-13 15:22:36 -0700641 * MOVQ m->morearg, $((argsize << 32) | frame)
Russ Cox95100342009-04-01 00:26:00 -0700642 * CALL sys.morestack(SB)
643 *
644 * the bottom StackGuard - StackSmall bytes are important:
645 * there has to be enough room to execute functions that
646 * refuse to check for stack overflow, either because they
647 * need to be adjacent to the actual caller's frame (sys.deferproc)
648 * or because they handle the imminent stack overflow (sys.morestack).
649 *
650 * for example, sys.deferproc might call malloc,
651 * which does one of the above checks (without allocating a full frame),
652 * which might trigger a call to sys.morestack.
653 * this sequence needs to fit in the bottom section of the stack.
654 * on amd64, sys.morestack's frame is 40 bytes, and
655 * sys.deferproc's frame is 56 bytes. that fits well within
656 * the StackGuard - StackSmall = 128 bytes at the bottom.
657 * there may be other sequences lurking or yet to be written
658 * that require more stack. sys.morestack checks to make sure
659 * the stack has not completely overflowed and should
660 * catch such sequences.
661 */
662enum
663{
664 // byte offset of stack guard (g->stackguard) above bottom of stack.
665 StackGuard = 256,
Russ Coxefc86a72008-11-25 16:48:10 -0800666
Russ Cox95100342009-04-01 00:26:00 -0700667 // checked frames are allowed to protrude below the guard by
668 // this many bytes. this saves an instruction in the checking
669 // sequence when the stack frame is tiny.
670 StackSmall = 128,
671
672 // extra space in the frame (beyond the function for which
673 // the frame is allocated) is assumed not to be much bigger
674 // than this amount. it may not be used efficiently if it is.
675 StackBig = 4096,
676};
Ken Thompsonaf58f172008-07-14 14:34:27 -0700677
678void
679oldstack(void)
680{
Russ Cox7343e032009-06-17 15:12:16 -0700681 Stktop *top, old;
Russ Coxa9996d02009-04-13 15:22:36 -0700682 uint32 args;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700683 byte *sp;
Russ Cox7343e032009-06-17 15:12:16 -0700684 G *g1;
Russ Cox36c5c5b2010-03-04 15:34:25 -0800685 static int32 goid;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700686
Russ Cox7343e032009-06-17 15:12:16 -0700687//printf("oldstack m->cret=%p\n", m->cret);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700688
Russ Cox7343e032009-06-17 15:12:16 -0700689 g1 = m->curg;
690 top = (Stktop*)g1->stackbase;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700691 sp = (byte*)top;
Russ Cox7343e032009-06-17 15:12:16 -0700692 old = *top;
693 args = old.args;
Russ Coxa9996d02009-04-13 15:22:36 -0700694 if(args > 0) {
Russ Coxa9996d02009-04-13 15:22:36 -0700695 sp -= args;
Russ Coxbba278a2009-07-08 18:16:09 -0700696 mcpy(top->fp, sp, args);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700697 }
Russ Cox36c5c5b2010-03-04 15:34:25 -0800698 goid = old.gobuf.g->goid; // fault if g is bad, before gogo
Ken Thompsonaf58f172008-07-14 14:34:27 -0700699
Russ Cox83727cc2010-03-29 21:48:22 -0700700 if(old.free)
701 stackfree(g1->stackguard - StackGuard);
Russ Cox7343e032009-06-17 15:12:16 -0700702 g1->stackbase = old.stackbase;
703 g1->stackguard = old.stackguard;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700704
Russ Cox7343e032009-06-17 15:12:16 -0700705 gogo(&old.gobuf, m->cret);
Russ Cox79e1db22008-12-04 08:30:54 -0800706}
707
708void
Ken Thompsonaf58f172008-07-14 14:34:27 -0700709newstack(void)
710{
Russ Cox95100342009-04-01 00:26:00 -0700711 int32 frame, args;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700712 Stktop *top;
713 byte *stk, *sp;
Russ Cox7343e032009-06-17 15:12:16 -0700714 G *g1;
715 Gobuf label;
Russ Cox83727cc2010-03-29 21:48:22 -0700716 bool free;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700717
Russ Cox7343e032009-06-17 15:12:16 -0700718 frame = m->moreframe;
719 args = m->moreargs;
Russ Cox83727cc2010-03-29 21:48:22 -0700720 g1 = m->curg;
Russ Cox6c196012010-04-05 12:51:09 -0700721
722
Russ Cox83727cc2010-03-29 21:48:22 -0700723 if(frame == 1 && args > 0 && m->morebuf.sp - sizeof(Stktop) - args - 32 > g1->stackguard) {
724 // special case: called from reflect.call (frame == 1)
725 // to call code with an arbitrary argument size,
726 // and we have enough space on the current stack.
727 // the new Stktop* is necessary to unwind, but
728 // we don't need to create a new segment.
729 top = (Stktop*)(m->morebuf.sp - sizeof(*top));
730 stk = g1->stackguard - StackGuard;
731 free = false;
732 } else {
733 // allocate new segment.
734 if(frame == 1) // failed reflect.call hint
735 frame = 0;
736 frame += args;
737 if(frame < StackBig)
738 frame = StackBig;
739 frame += 1024; // room for more functions, Stktop.
740 stk = stackalloc(frame);
741 top = (Stktop*)(stk+frame-sizeof(*top));
742 free = true;
743 }
Russ Coxf25586a2010-02-10 00:00:12 -0800744
Russ Coxbba278a2009-07-08 18:16:09 -0700745//printf("newstack frame=%d args=%d morepc=%p morefp=%p gobuf=%p, %p newstk=%p\n", frame, args, m->morepc, m->morefp, g->sched.pc, g->sched.sp, stk);
Russ Cox7343e032009-06-17 15:12:16 -0700746
Russ Cox7343e032009-06-17 15:12:16 -0700747 top->stackbase = g1->stackbase;
748 top->stackguard = g1->stackguard;
749 top->gobuf = m->morebuf;
Russ Coxbba278a2009-07-08 18:16:09 -0700750 top->fp = m->morefp;
Russ Cox7343e032009-06-17 15:12:16 -0700751 top->args = args;
Russ Cox83727cc2010-03-29 21:48:22 -0700752 top->free = free;
Russ Cox9b1507b2010-03-31 11:46:01 -0700753
754 // copy flag from panic
755 top->panic = g1->ispanic;
756 g1->ispanic = false;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700757
Russ Cox7343e032009-06-17 15:12:16 -0700758 g1->stackbase = (byte*)top;
759 g1->stackguard = stk + StackGuard;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700760
761 sp = (byte*)top;
Russ Cox95100342009-04-01 00:26:00 -0700762 if(args > 0) {
Russ Cox95100342009-04-01 00:26:00 -0700763 sp -= args;
Russ Coxbba278a2009-07-08 18:16:09 -0700764 mcpy(sp, m->morefp, args);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700765 }
766
Russ Cox7343e032009-06-17 15:12:16 -0700767 // Continue as if lessstack had just called m->morepc
768 // (the PC that decided to grow the stack).
769 label.sp = sp;
Russ Cox718be322010-01-25 18:52:55 -0800770 label.pc = (byte*)·lessstack;
Russ Cox7343e032009-06-17 15:12:16 -0700771 label.g = m->curg;
772 gogocall(&label, m->morepc);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700773
774 *(int32*)345 = 123; // never return
775}
776
Russ Cox95100342009-04-01 00:26:00 -0700777G*
778malg(int32 stacksize)
779{
780 G *g;
781 byte *stk;
782
783 g = malloc(sizeof(G));
Russ Coxf25586a2010-02-10 00:00:12 -0800784 if(stacksize >= 0) {
785 stk = stackalloc(stacksize + StackGuard);
786 g->stack0 = stk;
787 g->stackguard = stk + StackGuard;
788 g->stackbase = stk + StackGuard + stacksize - sizeof(Stktop);
789 runtime_memclr(g->stackbase, sizeof(Stktop));
790 }
Russ Cox95100342009-04-01 00:26:00 -0700791 return g;
792}
793
794/*
795 * Newproc and deferproc need to be textflag 7
796 * (no possible stack split when nearing overflow)
797 * because they assume that the arguments to fn
798 * are available sequentially beginning at &arg0.
799 * If a stack split happened, only the one word
800 * arg0 would be copied. It's okay if any functions
801 * they call split the stack below the newproc frame.
802 */
803#pragma textflag 7
804void
Russ Cox36c5c5b2010-03-04 15:34:25 -0800805·newproc(int32 siz, byte* fn, ...)
Russ Cox95100342009-04-01 00:26:00 -0700806{
Russ Cox36c5c5b2010-03-04 15:34:25 -0800807 newproc1(fn, (byte*)(&fn+1), siz, 0);
Russ Cox62d627f2010-02-08 21:41:54 -0800808}
809
Russ Cox4e28cfe2010-03-26 14:15:30 -0700810G*
Russ Cox62d627f2010-02-08 21:41:54 -0800811newproc1(byte *fn, byte *argp, int32 narg, int32 nret)
812{
Russ Coxf25586a2010-02-10 00:00:12 -0800813 byte *sp;
Russ Cox95100342009-04-01 00:26:00 -0700814 G *newg;
Russ Cox62d627f2010-02-08 21:41:54 -0800815 int32 siz;
Russ Cox95100342009-04-01 00:26:00 -0700816
Russ Cox62d627f2010-02-08 21:41:54 -0800817//printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret);
818 siz = narg + nret;
Russ Cox95100342009-04-01 00:26:00 -0700819 siz = (siz+7) & ~7;
820 if(siz > 1024)
Russ Cox718be322010-01-25 18:52:55 -0800821 throw("runtime.newproc: too many args");
Russ Cox95100342009-04-01 00:26:00 -0700822
823 lock(&sched);
824
825 if((newg = gfget()) != nil){
826 newg->status = Gwaiting;
Russ Cox83727cc2010-03-29 21:48:22 -0700827 if(newg->stackguard - StackGuard != newg->stack0)
828 throw("invalid stack in newg");
Russ Cox95100342009-04-01 00:26:00 -0700829 } else {
830 newg = malg(4096);
831 newg->status = Gwaiting;
832 newg->alllink = allg;
833 allg = newg;
834 }
Russ Cox95100342009-04-01 00:26:00 -0700835
Russ Coxf25586a2010-02-10 00:00:12 -0800836 sp = newg->stackbase;
Russ Cox95100342009-04-01 00:26:00 -0700837 sp -= siz;
Russ Cox62d627f2010-02-08 21:41:54 -0800838 mcpy(sp, argp, narg);
Russ Cox95100342009-04-01 00:26:00 -0700839
Russ Cox7343e032009-06-17 15:12:16 -0700840 newg->sched.sp = sp;
841 newg->sched.pc = (byte*)goexit;
842 newg->sched.g = newg;
843 newg->entry = fn;
Russ Cox95100342009-04-01 00:26:00 -0700844
845 sched.gcount++;
846 goidgen++;
847 newg->goid = goidgen;
848
Russ Coxfe8ff952009-08-31 18:10:11 -0700849 newprocreadylocked(newg);
Russ Cox95100342009-04-01 00:26:00 -0700850 unlock(&sched);
851
Russ Cox4e28cfe2010-03-26 14:15:30 -0700852 return newg;
Russ Cox95100342009-04-01 00:26:00 -0700853//printf(" goid=%d\n", newg->goid);
854}
855
856#pragma textflag 7
Russ Cox9b1507b2010-03-31 11:46:01 -0700857uintptr
Russ Cox36c5c5b2010-03-04 15:34:25 -0800858·deferproc(int32 siz, byte* fn, ...)
Russ Cox95100342009-04-01 00:26:00 -0700859{
860 Defer *d;
861
862 d = malloc(sizeof(*d) + siz - sizeof(d->args));
863 d->fn = fn;
Russ Cox36c5c5b2010-03-04 15:34:25 -0800864 d->sp = (byte*)(&fn+1);
Russ Cox95100342009-04-01 00:26:00 -0700865 d->siz = siz;
Russ Cox9b1507b2010-03-31 11:46:01 -0700866 d->pc = ·getcallerpc(&siz);
Russ Cox95100342009-04-01 00:26:00 -0700867 mcpy(d->args, d->sp, d->siz);
868
869 d->link = g->defer;
870 g->defer = d;
Russ Cox9b1507b2010-03-31 11:46:01 -0700871
872 // deferproc returns 0 normally.
873 // a deferred func that stops a panic
874 // makes the deferproc return 1.
875 // the code the compiler generates always
876 // checks the return value and jumps to the
877 // end of the function if deferproc returns != 0.
878 return 0;
Russ Cox95100342009-04-01 00:26:00 -0700879}
880
881#pragma textflag 7
882void
Russ Cox718be322010-01-25 18:52:55 -0800883·deferreturn(uintptr arg0)
Russ Cox95100342009-04-01 00:26:00 -0700884{
Russ Cox95100342009-04-01 00:26:00 -0700885 Defer *d;
Russ Coxaa3222d82009-06-02 23:02:12 -0700886 byte *sp, *fn;
Russ Cox95100342009-04-01 00:26:00 -0700887
888 d = g->defer;
889 if(d == nil)
890 return;
Russ Cox6c196012010-04-05 12:51:09 -0700891 sp = getcallersp(&arg0);
Russ Cox95100342009-04-01 00:26:00 -0700892 if(d->sp != sp)
893 return;
894 mcpy(d->sp, d->args, d->siz);
895 g->defer = d->link;
Russ Coxaa3222d82009-06-02 23:02:12 -0700896 fn = d->fn;
Russ Cox95100342009-04-01 00:26:00 -0700897 free(d);
Russ Coxaa3222d82009-06-02 23:02:12 -0700898 jmpdefer(fn, sp);
Russ Cox83727cc2010-03-29 21:48:22 -0700899}
900
901static void
902rundefer(void)
903{
904 Defer *d;
905
906 while((d = g->defer) != nil) {
907 g->defer = d->link;
908 reflect·call(d->fn, d->args, d->siz);
909 free(d);
910 }
911}
912
913// Free stack frames until we hit the last one
914// or until we find the one that contains the sp.
915static void
916unwindstack(G *gp, byte *sp)
917{
918 Stktop *top;
919 byte *stk;
920
921 // Must be called from a different goroutine, usually m->g0.
922 if(g == gp)
923 throw("unwindstack on self");
924
925 while((top = (Stktop*)gp->stackbase) != nil && top->stackbase != nil) {
926 stk = gp->stackguard - StackGuard;
927 if(stk <= sp && sp < gp->stackbase)
928 break;
929 gp->stackbase = top->stackbase;
930 gp->stackguard = top->stackguard;
931 free(stk);
932 }
933}
934
Russ Cox9b1507b2010-03-31 11:46:01 -0700935static void
936printpanics(Panic *p)
937{
938 if(p->link) {
939 printpanics(p->link);
940 printf("\t");
941 }
942 printf("panic: ");
Russ Cox63e878a2010-03-31 15:55:10 -0700943 ·printany(p->arg);
Russ Cox9b1507b2010-03-31 11:46:01 -0700944 if(p->recovered)
945 printf(" [recovered]");
946 printf("\n");
947}
948
949void
950·panic(Eface e)
951{
952 Defer *d;
953 Panic *p;
954
955 p = mal(sizeof *p);
956 p->arg = e;
957 p->link = g->panic;
958 p->stackbase = g->stackbase;
959 g->panic = p;
960
961 for(;;) {
962 d = g->defer;
963 if(d == nil)
964 break;
965 // take defer off list in case of recursive panic
966 g->defer = d->link;
967 g->ispanic = true; // rock for newstack, where reflect.call ends up
968 reflect·call(d->fn, d->args, d->siz);
969 if(p->recovered) {
970 g->panic = p->link;
971 free(p);
972 // put recovering defer back on list
973 // for scheduler to find.
974 d->link = g->defer;
975 g->defer = d;
976 g->status = Grecovery;
977 gosched();
978 throw("recovery failed"); // gosched should not return
979 }
980 free(d);
981 }
982
983 // ran out of deferred calls - old-school panic now
984 fd = 2;
985 printpanics(g->panic);
986 panic(0);
987}
988
989#pragma textflag 7 /* no split, or else g->stackguard is not the stack for fp */
990void
991·recover(byte *fp, Eface ret)
992{
993 Stktop *top, *oldtop;
994 Panic *p;
995
Russ Cox88ce9ce2010-04-06 13:48:31 -0700996 fp = getcallersp(fp);
997
Russ Cox9b1507b2010-03-31 11:46:01 -0700998 // Must be a panic going on.
999 if((p = g->panic) == nil || p->recovered)
1000 goto nomatch;
1001
1002 // Frame must be at the top of the stack segment,
1003 // because each deferred call starts a new stack
1004 // segment as a side effect of using reflect.call.
1005 // (There has to be some way to remember the
1006 // variable argument frame size, and the segment
1007 // code already takes care of that for us, so we
1008 // reuse it.)
1009 //
1010 // As usual closures complicate things: the fp that
1011 // the closure implementation function claims to have
1012 // is where the explicit arguments start, after the
1013 // implicit pointer arguments and PC slot.
1014 // If we're on the first new segment for a closure,
1015 // then fp == top - top->args is correct, but if
1016 // the closure has its own big argument frame and
1017 // allocated a second segment (see below),
1018 // the fp is slightly above top - top->args.
1019 // That condition can't happen normally though
1020 // (stack pointer go down, not up), so we can accept
1021 // any fp between top and top - top->args as
1022 // indicating the top of the segment.
1023 top = (Stktop*)g->stackbase;
1024 if(fp < (byte*)top - top->args || (byte*)top < fp)
1025 goto nomatch;
1026
1027 // The deferred call makes a new segment big enough
1028 // for the argument frame but not necessarily big
1029 // enough for the function's local frame (size unknown
1030 // at the time of the call), so the function might have
1031 // made its own segment immediately. If that's the
1032 // case, back top up to the older one, the one that
1033 // reflect.call would have made for the panic.
1034 //
1035 // The fp comparison here checks that the argument
1036 // frame that was copied during the split (the top->args
1037 // bytes above top->fp) abuts the old top of stack.
1038 // This is a correct test for both closure and non-closure code.
1039 oldtop = (Stktop*)top->stackbase;
1040 if(oldtop != nil && top->fp == (byte*)oldtop - top->args)
1041 top = oldtop;
1042
1043 // Now we have the segment that was created to
1044 // run this call. It must have been marked as a panic segment.
1045 if(!top->panic)
1046 goto nomatch;
1047
1048 // Okay, this is the top frame of a deferred call
1049 // in response to a panic. It can see the panic argument.
1050 p->recovered = 1;
1051 ret = p->arg;
1052 FLUSH(&ret);
1053 return;
1054
1055nomatch:
1056 ret.type = nil;
1057 ret.data = nil;
1058 FLUSH(&ret);
1059}
1060
1061
Russ Cox83727cc2010-03-29 21:48:22 -07001062// Put on gfree list. Sched must be locked.
1063static void
1064gfput(G *g)
1065{
1066 if(g->stackguard - StackGuard != g->stack0)
1067 throw("invalid stack in gfput");
1068 g->schedlink = sched.gfree;
1069 sched.gfree = g;
1070}
1071
1072// Get from gfree list. Sched must be locked.
1073static G*
1074gfget(void)
1075{
1076 G *g;
1077
1078 g = sched.gfree;
1079 if(g)
1080 sched.gfree = g->schedlink;
1081 return g;
1082}
Russ Cox79e1db22008-12-04 08:30:54 -08001083
Russ Cox918afd942009-05-08 15:21:41 -07001084void
Russ Cox718be322010-01-25 18:52:55 -08001085·Breakpoint(void)
Russ Cox918afd942009-05-08 15:21:41 -07001086{
1087 breakpoint();
1088}
1089
1090void
Russ Cox718be322010-01-25 18:52:55 -08001091·Goexit(void)
Russ Cox918afd942009-05-08 15:21:41 -07001092{
Russ Cox83727cc2010-03-29 21:48:22 -07001093 rundefer();
Russ Cox918afd942009-05-08 15:21:41 -07001094 goexit();
1095}
1096
1097void
Russ Cox718be322010-01-25 18:52:55 -08001098·Gosched(void)
Russ Cox918afd942009-05-08 15:21:41 -07001099{
1100 gosched();
1101}
1102
Russ Cox218c3932009-07-13 17:28:39 -07001103void
Russ Cox718be322010-01-25 18:52:55 -08001104·LockOSThread(void)
Russ Cox218c3932009-07-13 17:28:39 -07001105{
1106 if(sched.predawn)
1107 throw("cannot wire during init");
1108 m->lockedg = g;
1109 g->lockedm = m;
1110}
1111
Rob Pike79554902009-08-06 13:07:05 -07001112// delete when scheduler is stronger
1113void
Russ Cox718be322010-01-25 18:52:55 -08001114·GOMAXPROCS(int32 n)
Rob Pike79554902009-08-06 13:07:05 -07001115{
1116 if(n < 1)
1117 n = 1;
1118
1119 lock(&sched);
1120 sched.gomaxprocs = n;
1121 sched.mcpumax = n;
Russ Cox88ce9ce2010-04-06 13:48:31 -07001122 // handle fewer procs?
1123 if(sched.mcpu > sched.mcpumax) {
Rob Pike79554902009-08-06 13:07:05 -07001124 unlock(&sched);
Russ Cox88ce9ce2010-04-06 13:48:31 -07001125 // just give up the cpu.
1126 // we'll only get rescheduled once the
1127 // number has come down.
1128 gosched();
1129 return;
Rob Pike79554902009-08-06 13:07:05 -07001130 }
1131 // handle more procs
1132 matchmg();
1133 unlock(&sched);
1134}
1135
Russ Cox218c3932009-07-13 17:28:39 -07001136void
Russ Cox718be322010-01-25 18:52:55 -08001137·UnlockOSThread(void)
Russ Cox218c3932009-07-13 17:28:39 -07001138{
1139 m->lockedg = nil;
1140 g->lockedm = nil;
1141}
1142
1143// for testing of wire, unwire
1144void
Russ Cox718be322010-01-25 18:52:55 -08001145·mid(uint32 ret)
Russ Cox218c3932009-07-13 17:28:39 -07001146{
1147 ret = m->id;
1148 FLUSH(&ret);
1149}