blob: 06859b09ca02bf9b3ec886d324072cb1b9d66fe4 [file] [log] [blame]
Ken Thompsonaf58f172008-07-14 14:34:27 -07001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5#include "runtime.h"
Russ Cox1ce17912009-01-26 17:37:05 -08006#include "malloc.h"
Ken Thompsonaf58f172008-07-14 14:34:27 -07007
Russ Coxd28acc42008-08-04 16:43:49 -07008typedef struct Sched Sched;
9
10M m0;
11G g0; // idle goroutine for m0
12
Ken Thompsonaf58f172008-07-14 14:34:27 -070013static int32 debug = 0;
Russ Coxefc86a72008-11-25 16:48:10 -080014static Lock debuglock;
Ken Thompsonaf58f172008-07-14 14:34:27 -070015
Russ Cox96824002008-08-05 14:18:47 -070016// Go scheduler
17//
18// The go scheduler's job is to match ready-to-run goroutines (`g's)
19// with waiting-for-work schedulers (`m's). If there are ready gs
20// and no waiting ms, ready() will start a new m running in a new
21// OS thread, so that all ready gs can run simultaneously, up to a limit.
22// For now, ms never go away.
23//
24// The default maximum number of ms is one: go runs single-threaded.
25// This is because some locking details have to be worked ou
26// (select in particular is not locked properly) and because the low-level
27// code hasn't been written yet for OS X. Setting the environmen
28// variable $gomaxprocs changes sched.mmax for now.
29//
30// Even a program that can run without deadlock in a single process
31// might use more ms if given the chance. For example, the prime
32// sieve will use as many ms as there are primes (up to sched.mmax),
33// allowing different stages of the pipeline to execute in parallel.
34// We could revisit this choice, only kicking off new ms for blocking
35// system calls, but that would limit the amount of parallel computation
36// that go would try to do.
37//
38// In general, one could imagine all sorts of refinements to the
39// scheduler, but the goal now is just to get something working on
40// Linux and OS X.
41
Russ Coxd28acc42008-08-04 16:43:49 -070042struct Sched {
Russ Coxd28acc42008-08-04 16:43:49 -070043 Lock;
Russ Cox96824002008-08-05 14:18:47 -070044
45 G *gfree; // available gs (status == Gdead)
Russ Coxf7f63292008-08-05 14:21:42 -070046
Russ Cox96824002008-08-05 14:18:47 -070047 G *ghead; // gs waiting to run
48 G *gtail;
49 int32 gwait; // number of gs waiting to run
50 int32 gcount; // number of gs that are alive
Russ Coxf7f63292008-08-05 14:21:42 -070051
Russ Cox96824002008-08-05 14:18:47 -070052 M *mhead; // ms waiting for work
53 int32 mwait; // number of ms waiting for work
Russ Coxefc86a72008-11-25 16:48:10 -080054 int32 mcount; // number of ms that have been created
55 int32 mcpu; // number of ms executing on cpu
56 int32 mcpumax; // max number of ms allowed on cpu
Russ Cox3f8aa662008-12-05 15:24:18 -080057 int32 gomaxprocs;
Russ Coxefc86a72008-11-25 16:48:10 -080058 int32 msyscall; // number of ms in system calls
Russ Coxf7f63292008-08-05 14:21:42 -070059
Russ Cox96824002008-08-05 14:18:47 -070060 int32 predawn; // running initialization, don't run new gs.
Russ Cox3f8aa662008-12-05 15:24:18 -080061
62 Note stopped; // one g can wait here for ms to stop
Russ Coxbe629132008-12-08 17:14:08 -080063 int32 waitstop; // after setting this flag
Russ Coxd28acc42008-08-04 16:43:49 -070064};
65
66Sched sched;
67
Russ Cox96824002008-08-05 14:18:47 -070068// Scheduling helpers. Sched must be locked.
69static void gput(G*); // put/get on ghead/gtail
70static G* gget(void);
71static void mput(M*); // put/get on mhead
Russ Cox218c3932009-07-13 17:28:39 -070072static M* mget(G*);
Russ Cox96824002008-08-05 14:18:47 -070073static void gfput(G*); // put/get on gfree
74static G* gfget(void);
Russ Coxefc86a72008-11-25 16:48:10 -080075static void matchmg(void); // match ms to gs
Russ Cox96824002008-08-05 14:18:47 -070076static void readylocked(G*); // ready, but sched is locked
Russ Cox218c3932009-07-13 17:28:39 -070077static void mnextg(M*, G*);
Russ Cox96824002008-08-05 14:18:47 -070078
79// Scheduler loop.
80static void scheduler(void);
81
Russ Coxa67258f2008-09-18 15:56:46 -070082// The bootstrap sequence is:
83//
84// call osinit
85// call schedinit
86// make & queue new G
87// call mstart
88//
89// The new G does:
90//
91// call main·init_function
92// call initdone
93// call main·main
Russ Cox96824002008-08-05 14:18:47 -070094void
95schedinit(void)
96{
97 int32 n;
98 byte *p;
Russ Coxf7f63292008-08-05 14:21:42 -070099
Russ Coxe29ce172008-12-18 15:42:28 -0800100 mallocinit();
Russ Cox36096242009-01-16 14:58:14 -0800101 goargs();
102
Russ Coxda0a7d72008-12-19 03:13:39 -0800103 // Allocate internal symbol table representation now,
104 // so that we don't need to call malloc when we crash.
105 findfunc(0);
Russ Coxe29ce172008-12-18 15:42:28 -0800106
Russ Cox3f8aa662008-12-05 15:24:18 -0800107 sched.gomaxprocs = 1;
Russ Cox9350ef42008-09-17 13:49:23 -0700108 p = getenv("GOMAXPROCS");
Russ Cox96824002008-08-05 14:18:47 -0700109 if(p != nil && (n = atoi(p)) != 0)
Russ Cox3f8aa662008-12-05 15:24:18 -0800110 sched.gomaxprocs = n;
111 sched.mcpumax = sched.gomaxprocs;
Russ Cox96824002008-08-05 14:18:47 -0700112 sched.mcount = 1;
113 sched.predawn = 1;
114}
115
Russ Coxa67258f2008-09-18 15:56:46 -0700116// Called after main·init_function; main·main will be called on return.
Russ Cox96824002008-08-05 14:18:47 -0700117void
Russ Coxa67258f2008-09-18 15:56:46 -0700118initdone(void)
Russ Cox96824002008-08-05 14:18:47 -0700119{
Russ Cox96824002008-08-05 14:18:47 -0700120 // Let's go.
121 sched.predawn = 0;
Russ Cox1ce17912009-01-26 17:37:05 -0800122 mstats.enablegc = 1;
Russ Cox96824002008-08-05 14:18:47 -0700123
Russ Cox96824002008-08-05 14:18:47 -0700124 // If main·init_function started other goroutines,
125 // kick off new ms to handle them, like ready
126 // would have, had it not been pre-dawn.
Russ Coxefc86a72008-11-25 16:48:10 -0800127 lock(&sched);
128 matchmg();
129 unlock(&sched);
Russ Cox96824002008-08-05 14:18:47 -0700130}
131
Ken Thompsonaf58f172008-07-14 14:34:27 -0700132void
Russ Cox918afd942009-05-08 15:21:41 -0700133goexit(void)
Ken Thompsonaf58f172008-07-14 14:34:27 -0700134{
Russ Cox96824002008-08-05 14:18:47 -0700135 g->status = Gmoribund;
Russ Cox918afd942009-05-08 15:21:41 -0700136 gosched();
Ken Thompsonaf58f172008-07-14 14:34:27 -0700137}
138
Ken Thompson1e1cc4e2009-01-27 12:03:53 -0800139void
Rob Pike3835e012008-07-28 11:29:41 -0700140tracebackothers(G *me)
141{
142 G *g;
143
144 for(g = allg; g != nil; g = g->alllink) {
Russ Coxd28acc42008-08-04 16:43:49 -0700145 if(g == me || g->status == Gdead)
Rob Pike3835e012008-07-28 11:29:41 -0700146 continue;
Russ Coxefc86a72008-11-25 16:48:10 -0800147 printf("\ngoroutine %d:\n", g->goid);
Russ Cox7343e032009-06-17 15:12:16 -0700148 traceback(g->sched.pc, g->sched.sp, g);
Rob Pike3835e012008-07-28 11:29:41 -0700149 }
150}
151
Russ Cox96824002008-08-05 14:18:47 -0700152// Put on `g' queue. Sched must be locked.
Russ Coxd28acc42008-08-04 16:43:49 -0700153static void
Russ Cox96824002008-08-05 14:18:47 -0700154gput(G *g)
Russ Coxd28acc42008-08-04 16:43:49 -0700155{
Russ Cox218c3932009-07-13 17:28:39 -0700156 M *m;
157
158 // If g is wired, hand it off directly.
159 if((m = g->lockedm) != nil) {
160 mnextg(m, g);
161 return;
162 }
163
Russ Cox96824002008-08-05 14:18:47 -0700164 g->schedlink = nil;
165 if(sched.ghead == nil)
166 sched.ghead = g;
Russ Coxd28acc42008-08-04 16:43:49 -0700167 else
Russ Cox96824002008-08-05 14:18:47 -0700168 sched.gtail->schedlink = g;
169 sched.gtail = g;
170 sched.gwait++;
Russ Coxd28acc42008-08-04 16:43:49 -0700171}
172
Russ Cox96824002008-08-05 14:18:47 -0700173// Get from `g' queue. Sched must be locked.
174static G*
175gget(void)
176{
177 G *g;
Russ Coxf7f63292008-08-05 14:21:42 -0700178
Russ Cox96824002008-08-05 14:18:47 -0700179 g = sched.ghead;
180 if(g){
181 sched.ghead = g->schedlink;
182 if(sched.ghead == nil)
183 sched.gtail = nil;
184 sched.gwait--;
185 }
186 return g;
187}
188
189// Put on `m' list. Sched must be locked.
190static void
191mput(M *m)
192{
193 m->schedlink = sched.mhead;
194 sched.mhead = m;
195 sched.mwait++;
196}
197
Russ Cox218c3932009-07-13 17:28:39 -0700198// Get an `m' to run `g'. Sched must be locked.
Russ Cox96824002008-08-05 14:18:47 -0700199static M*
Russ Cox218c3932009-07-13 17:28:39 -0700200mget(G *g)
Russ Cox96824002008-08-05 14:18:47 -0700201{
202 M *m;
Russ Coxf7f63292008-08-05 14:21:42 -0700203
Russ Cox218c3932009-07-13 17:28:39 -0700204 // if g has its own m, use it.
205 if((m = g->lockedm) != nil)
206 return m;
207
208 // otherwise use general m pool.
209 if((m = sched.mhead) != nil){
Russ Cox96824002008-08-05 14:18:47 -0700210 sched.mhead = m->schedlink;
211 sched.mwait--;
212 }
213 return m;
214}
215
216// Put on gfree list. Sched must be locked.
217static void
218gfput(G *g)
219{
220 g->schedlink = sched.gfree;
221 sched.gfree = g;
222}
223
224// Get from gfree list. Sched must be locked.
225static G*
226gfget(void)
227{
228 G *g;
Russ Coxf7f63292008-08-05 14:21:42 -0700229
Russ Cox96824002008-08-05 14:18:47 -0700230 g = sched.gfree;
231 if(g)
232 sched.gfree = g->schedlink;
233 return g;
234}
235
236// Mark g ready to run.
Russ Coxd28acc42008-08-04 16:43:49 -0700237void
238ready(G *g)
239{
Russ Coxd28acc42008-08-04 16:43:49 -0700240 lock(&sched);
Russ Cox96824002008-08-05 14:18:47 -0700241 readylocked(g);
242 unlock(&sched);
Russ Coxd28acc42008-08-04 16:43:49 -0700243}
244
Russ Coxa61bb952008-09-24 14:13:07 -0700245// Mark g ready to run. Sched is already locked.
246// G might be running already and about to stop.
247// The sched lock protects g->status from changing underfoot.
Russ Cox96824002008-08-05 14:18:47 -0700248static void
249readylocked(G *g)
250{
Russ Coxa61bb952008-09-24 14:13:07 -0700251 if(g->m){
252 // Running on another machine.
253 // Ready it when it stops.
254 g->readyonstop = 1;
255 return;
256 }
Russ Coxd28acc42008-08-04 16:43:49 -0700257
Russ Cox96824002008-08-05 14:18:47 -0700258 // Mark runnable.
259 if(g->status == Grunnable || g->status == Grunning)
260 throw("bad g->status in ready");
261 g->status = Grunnable;
262
Russ Coxefc86a72008-11-25 16:48:10 -0800263 gput(g);
264 if(!sched.predawn)
265 matchmg();
Russ Cox96824002008-08-05 14:18:47 -0700266}
267
Russ Coxfe8ff952009-08-31 18:10:11 -0700268// Same as readylocked but a different symbol so that
269// debuggers can set a breakpoint here and catch all
270// new goroutines.
271static void
272newprocreadylocked(G *g)
273{
274 readylocked(g);
275}
276
Russ Cox218c3932009-07-13 17:28:39 -0700277// Pass g to m for running.
278static void
279mnextg(M *m, G *g)
280{
281 sched.mcpu++;
282 m->nextg = g;
283 if(m->waitnextg) {
284 m->waitnextg = 0;
285 notewakeup(&m->havenextg);
286 }
287}
288
Russ Cox96824002008-08-05 14:18:47 -0700289// Get the next goroutine that m should run.
290// Sched must be locked on entry, is unlocked on exit.
Russ Coxefc86a72008-11-25 16:48:10 -0800291// Makes sure that at most $GOMAXPROCS gs are
292// running on cpus (not in system calls) at any given time.
Russ Cox96824002008-08-05 14:18:47 -0700293static G*
294nextgandunlock(void)
Ken Thompsonaf58f172008-07-14 14:34:27 -0700295{
Ken Thompsone7d549f2008-07-16 13:50:23 -0700296 G *gp;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700297
Russ Cox218c3932009-07-13 17:28:39 -0700298 if(sched.mcpu < 0)
299 throw("negative sched.mcpu");
300
301 // If there is a g waiting as m->nextg,
302 // mnextg took care of the sched.mcpu++.
Russ Coxefc86a72008-11-25 16:48:10 -0800303 if(m->nextg != nil) {
304 gp = m->nextg;
305 m->nextg = nil;
Russ Cox96824002008-08-05 14:18:47 -0700306 unlock(&sched);
307 return gp;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700308 }
Russ Cox96824002008-08-05 14:18:47 -0700309
Russ Cox218c3932009-07-13 17:28:39 -0700310 if(m->lockedg != nil) {
311 // We can only run one g, and it's not available.
312 // Make sure some other cpu is running to handle
313 // the ordinary run queue.
314 if(sched.gwait != 0)
315 matchmg();
316 } else {
317 // Look for work on global queue.
318 while(sched.mcpu < sched.mcpumax && (gp=gget()) != nil) {
319 if(gp->lockedm) {
320 mnextg(gp->lockedm, gp);
321 continue;
322 }
323 sched.mcpu++; // this m will run gp
324 unlock(&sched);
325 return gp;
Russ Coxefc86a72008-11-25 16:48:10 -0800326 }
Russ Cox218c3932009-07-13 17:28:39 -0700327 // Otherwise, wait on global m queue.
328 mput(m);
Russ Coxefc86a72008-11-25 16:48:10 -0800329 }
Russ Coxefc86a72008-11-25 16:48:10 -0800330 if(sched.mcpu == 0 && sched.msyscall == 0)
Russ Cox72154b02008-09-26 14:10:26 -0700331 throw("all goroutines are asleep - deadlock!");
Russ Cox96824002008-08-05 14:18:47 -0700332 m->nextg = nil;
Russ Cox218c3932009-07-13 17:28:39 -0700333 m->waitnextg = 1;
Russ Cox96824002008-08-05 14:18:47 -0700334 noteclear(&m->havenextg);
Russ Cox53e69e12009-01-27 14:01:20 -0800335 if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
Russ Coxbe629132008-12-08 17:14:08 -0800336 sched.waitstop = 0;
337 notewakeup(&sched.stopped);
338 }
Russ Cox96824002008-08-05 14:18:47 -0700339 unlock(&sched);
Russ Coxf7f63292008-08-05 14:21:42 -0700340
Russ Cox96824002008-08-05 14:18:47 -0700341 notesleep(&m->havenextg);
342 if((gp = m->nextg) == nil)
343 throw("bad m->nextg in nextgoroutine");
344 m->nextg = nil;
Russ Coxd28acc42008-08-04 16:43:49 -0700345 return gp;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700346}
347
Russ Cox3f8aa662008-12-05 15:24:18 -0800348// TODO(rsc): Remove. This is only temporary,
349// for the mark and sweep collector.
350void
351stoptheworld(void)
352{
353 lock(&sched);
354 sched.mcpumax = 1;
355 while(sched.mcpu > 1) {
356 noteclear(&sched.stopped);
Russ Coxbe629132008-12-08 17:14:08 -0800357 sched.waitstop = 1;
Russ Cox3f8aa662008-12-05 15:24:18 -0800358 unlock(&sched);
359 notesleep(&sched.stopped);
360 lock(&sched);
361 }
362 unlock(&sched);
363}
364
365// TODO(rsc): Remove. This is only temporary,
366// for the mark and sweep collector.
367void
368starttheworld(void)
369{
370 lock(&sched);
371 sched.mcpumax = sched.gomaxprocs;
372 matchmg();
373 unlock(&sched);
374}
375
Russ Coxa67258f2008-09-18 15:56:46 -0700376// Called to start an M.
377void
378mstart(void)
379{
Russ Coxe29ce172008-12-18 15:42:28 -0800380 if(m->mcache == nil)
381 m->mcache = allocmcache();
Russ Coxa67258f2008-09-18 15:56:46 -0700382 minit();
383 scheduler();
384}
385
Russ Coxefc86a72008-11-25 16:48:10 -0800386// Kick of new ms as needed (up to mcpumax).
387// There are already `other' other cpus that will
388// start looking for goroutines shortly.
389// Sched is locked.
390static void
391matchmg(void)
392{
393 M *m;
394 G *g;
395
Russ Coxefc86a72008-11-25 16:48:10 -0800396 while(sched.mcpu < sched.mcpumax && (g = gget()) != nil){
Russ Cox218c3932009-07-13 17:28:39 -0700397 // Find the m that will run g.
398 if((m = mget(g)) == nil){
Russ Cox1ce17912009-01-26 17:37:05 -0800399 m = malloc(sizeof(M));
Russ Coxda0a7d72008-12-19 03:13:39 -0800400 m->g0 = malg(8192);
Russ Coxefc86a72008-11-25 16:48:10 -0800401 m->id = sched.mcount++;
Russ Coxefc86a72008-11-25 16:48:10 -0800402 newosproc(m, m->g0, m->g0->stackbase, mstart);
403 }
Russ Cox218c3932009-07-13 17:28:39 -0700404 mnextg(m, g);
Russ Coxefc86a72008-11-25 16:48:10 -0800405 }
406}
407
Russ Cox96824002008-08-05 14:18:47 -0700408// Scheduler loop: find g to run, run it, repeat.
409static void
Russ Cox4feda712008-08-02 22:34:04 -0700410scheduler(void)
411{
412 G* gp;
Russ Coxd28acc42008-08-04 16:43:49 -0700413
Russ Coxd28acc42008-08-04 16:43:49 -0700414 lock(&sched);
Russ Cox7343e032009-06-17 15:12:16 -0700415 if(gosave(&m->sched) != 0){
Russ Coxa67258f2008-09-18 15:56:46 -0700416 // Jumped here via gosave/gogo, so didn't
Russ Cox96824002008-08-05 14:18:47 -0700417 // execute lock(&sched) above.
418 lock(&sched);
Russ Cox72154b02008-09-26 14:10:26 -0700419
Russ Coxa67258f2008-09-18 15:56:46 -0700420 if(sched.predawn)
421 throw("init sleeping");
Russ Cox96824002008-08-05 14:18:47 -0700422
423 // Just finished running m->curg.
Russ Coxd28acc42008-08-04 16:43:49 -0700424 gp = m->curg;
Russ Coxa61bb952008-09-24 14:13:07 -0700425 gp->m = nil;
Russ Coxefc86a72008-11-25 16:48:10 -0800426 sched.mcpu--;
Russ Cox218c3932009-07-13 17:28:39 -0700427
428 if(sched.mcpu < 0)
429 throw("sched.mcpu < 0 in scheduler");
Russ Coxd28acc42008-08-04 16:43:49 -0700430 switch(gp->status){
Russ Coxd28acc42008-08-04 16:43:49 -0700431 case Grunnable:
Russ Cox96824002008-08-05 14:18:47 -0700432 case Gdead:
433 // Shouldn't have been running!
434 throw("bad gp->status in sched");
435 case Grunning:
436 gp->status = Grunnable;
437 gput(gp);
438 break;
439 case Gmoribund:
440 gp->status = Gdead;
Russ Cox218c3932009-07-13 17:28:39 -0700441 if(gp->lockedm) {
442 gp->lockedm = nil;
443 m->lockedg = nil;
444 }
Russ Cox2aea4a02009-08-26 15:26:09 -0700445 gfput(gp);
Russ Cox96824002008-08-05 14:18:47 -0700446 if(--sched.gcount == 0)
Russ Cox918afd942009-05-08 15:21:41 -0700447 exit(0);
Russ Coxd28acc42008-08-04 16:43:49 -0700448 break;
449 }
Russ Coxa61bb952008-09-24 14:13:07 -0700450 if(gp->readyonstop){
451 gp->readyonstop = 0;
452 readylocked(gp);
453 }
Russ Coxd28acc42008-08-04 16:43:49 -0700454 }
455
Russ Cox96824002008-08-05 14:18:47 -0700456 // Find (or wait for) g to run. Unlocks sched.
457 gp = nextgandunlock();
Russ Coxa61bb952008-09-24 14:13:07 -0700458 gp->readyonstop = 0;
Russ Coxd28acc42008-08-04 16:43:49 -0700459 gp->status = Grunning;
Russ Cox4feda712008-08-02 22:34:04 -0700460 m->curg = gp;
Russ Coxa61bb952008-09-24 14:13:07 -0700461 gp->m = m;
Russ Cox7343e032009-06-17 15:12:16 -0700462 if(gp->sched.pc == (byte*)goexit) // kickoff
463 gogocall(&gp->sched, (void(*)(void))gp->entry);
464 gogo(&gp->sched, 1);
Russ Cox4feda712008-08-02 22:34:04 -0700465}
466
Russ Cox96824002008-08-05 14:18:47 -0700467// Enter scheduler. If g->status is Grunning,
468// re-queues g and runs everyone else who is waiting
469// before running g again. If g->status is Gmoribund,
470// kills off g.
Ken Thompsonaf58f172008-07-14 14:34:27 -0700471void
Russ Cox918afd942009-05-08 15:21:41 -0700472gosched(void)
Ken Thompsonaf58f172008-07-14 14:34:27 -0700473{
Russ Cox1ce17912009-01-26 17:37:05 -0800474 if(g == m->g0)
475 throw("gosched of g0");
Russ Cox7343e032009-06-17 15:12:16 -0700476 if(gosave(&g->sched) == 0)
477 gogo(&m->sched, 1);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700478}
479
Russ Coxefc86a72008-11-25 16:48:10 -0800480// The goroutine g is about to enter a system call.
481// Record that it's not using the cpu anymore.
482// This is called only from the go syscall library, not
483// from the low-level system calls used by the runtime.
484// The "arguments" are syscall.Syscall's stack frame
485void
486sys·entersyscall(uint64 callerpc, int64 trap)
Russ Cox96824002008-08-05 14:18:47 -0700487{
Russ Cox218c3932009-07-13 17:28:39 -0700488 USED(callerpc, trap);
Russ Coxf7f63292008-08-05 14:21:42 -0700489
Russ Coxefc86a72008-11-25 16:48:10 -0800490 lock(&sched);
Russ Cox052a66b2009-07-21 19:43:27 -0700491 if(sched.predawn) {
492 unlock(&sched);
493 return;
494 }
Russ Cox3f8aa662008-12-05 15:24:18 -0800495 g->status = Gsyscall;
Russ Cox36835c72009-06-15 21:30:53 -0700496 // Leave SP around for gc and traceback.
497 // Do before notewakeup so that gc
498 // never sees Gsyscall with wrong stack.
499 gosave(&g->sched);
Russ Coxefc86a72008-11-25 16:48:10 -0800500 sched.mcpu--;
501 sched.msyscall++;
502 if(sched.gwait != 0)
503 matchmg();
Russ Cox53e69e12009-01-27 14:01:20 -0800504 if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
505 sched.waitstop = 0;
506 notewakeup(&sched.stopped);
507 }
Russ Coxefc86a72008-11-25 16:48:10 -0800508 unlock(&sched);
509}
510
511// The goroutine g exited its system call.
512// Arrange for it to run on a cpu again.
513// This is called only from the go syscall library, not
514// from the low-level system calls used by the runtime.
515void
516sys·exitsyscall(void)
517{
Russ Coxefc86a72008-11-25 16:48:10 -0800518 lock(&sched);
Russ Cox052a66b2009-07-21 19:43:27 -0700519 if(sched.predawn) {
520 unlock(&sched);
521 return;
522 }
Russ Cox3f8aa662008-12-05 15:24:18 -0800523 g->status = Grunning;
Russ Coxefc86a72008-11-25 16:48:10 -0800524 sched.msyscall--;
525 sched.mcpu++;
526 // Fast path - if there's room for this m, we're done.
527 if(sched.mcpu <= sched.mcpumax) {
528 unlock(&sched);
529 return;
530 }
531 unlock(&sched);
532
533 // Slow path - all the cpus are taken.
534 // The scheduler will ready g and put this m to sleep.
Russ Cox218c3932009-07-13 17:28:39 -0700535 // When the scheduler takes g away from m,
Russ Coxefc86a72008-11-25 16:48:10 -0800536 // it will undo the sched.mcpu++ above.
Russ Cox918afd942009-05-08 15:21:41 -0700537 gosched();
Russ Cox96824002008-08-05 14:18:47 -0700538}
539
Russ Cox95100342009-04-01 00:26:00 -0700540/*
541 * stack layout parameters.
542 * known to linkers.
543 *
544 * g->stackguard is set to point StackGuard bytes
545 * above the bottom of the stack. each function
546 * compares its stack pointer against g->stackguard
547 * to check for overflow. to cut one instruction from
548 * the check sequence for functions with tiny frames,
549 * the stack is allowed to protrude StackSmall bytes
550 * below the stack guard. functions with large frames
551 * don't bother with the check and always call morestack.
552 * the sequences are:
553 *
Russ Coxd6c59ad2009-04-02 16:41:53 -0700554 * guard = g->stackguard
555 * frame = function's stack frame size
556 * argsize = size of function arguments (call + return)
557 *
Russ Cox95100342009-04-01 00:26:00 -0700558 * stack frame size <= StackSmall:
559 * CMPQ guard, SP
560 * JHI 3(PC)
Russ Coxa9996d02009-04-13 15:22:36 -0700561 * MOVQ m->morearg, $(argsize << 32)
Russ Cox95100342009-04-01 00:26:00 -0700562 * CALL sys.morestack(SB)
563 *
564 * stack frame size > StackSmall but < StackBig
565 * LEAQ (frame-StackSmall)(SP), R0
566 * CMPQ guard, R0
567 * JHI 3(PC)
Russ Coxa9996d02009-04-13 15:22:36 -0700568 * MOVQ m->morearg, $(argsize << 32)
Russ Cox95100342009-04-01 00:26:00 -0700569 * CALL sys.morestack(SB)
570 *
571 * stack frame size >= StackBig:
Russ Coxa9996d02009-04-13 15:22:36 -0700572 * MOVQ m->morearg, $((argsize << 32) | frame)
Russ Cox95100342009-04-01 00:26:00 -0700573 * CALL sys.morestack(SB)
574 *
575 * the bottom StackGuard - StackSmall bytes are important:
576 * there has to be enough room to execute functions that
577 * refuse to check for stack overflow, either because they
578 * need to be adjacent to the actual caller's frame (sys.deferproc)
579 * or because they handle the imminent stack overflow (sys.morestack).
580 *
581 * for example, sys.deferproc might call malloc,
582 * which does one of the above checks (without allocating a full frame),
583 * which might trigger a call to sys.morestack.
584 * this sequence needs to fit in the bottom section of the stack.
585 * on amd64, sys.morestack's frame is 40 bytes, and
586 * sys.deferproc's frame is 56 bytes. that fits well within
587 * the StackGuard - StackSmall = 128 bytes at the bottom.
588 * there may be other sequences lurking or yet to be written
589 * that require more stack. sys.morestack checks to make sure
590 * the stack has not completely overflowed and should
591 * catch such sequences.
592 */
593enum
594{
595 // byte offset of stack guard (g->stackguard) above bottom of stack.
596 StackGuard = 256,
Russ Coxefc86a72008-11-25 16:48:10 -0800597
Russ Cox95100342009-04-01 00:26:00 -0700598 // checked frames are allowed to protrude below the guard by
599 // this many bytes. this saves an instruction in the checking
600 // sequence when the stack frame is tiny.
601 StackSmall = 128,
602
603 // extra space in the frame (beyond the function for which
604 // the frame is allocated) is assumed not to be much bigger
605 // than this amount. it may not be used efficiently if it is.
606 StackBig = 4096,
607};
Ken Thompsonaf58f172008-07-14 14:34:27 -0700608
609void
610oldstack(void)
611{
Russ Cox7343e032009-06-17 15:12:16 -0700612 Stktop *top, old;
Russ Coxa9996d02009-04-13 15:22:36 -0700613 uint32 args;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700614 byte *sp;
Russ Cox7343e032009-06-17 15:12:16 -0700615 G *g1;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700616
Russ Cox7343e032009-06-17 15:12:16 -0700617//printf("oldstack m->cret=%p\n", m->cret);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700618
Russ Cox7343e032009-06-17 15:12:16 -0700619 g1 = m->curg;
620 top = (Stktop*)g1->stackbase;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700621 sp = (byte*)top;
Russ Cox7343e032009-06-17 15:12:16 -0700622 old = *top;
623 args = old.args;
Russ Coxa9996d02009-04-13 15:22:36 -0700624 if(args > 0) {
Russ Coxa9996d02009-04-13 15:22:36 -0700625 sp -= args;
Russ Coxbba278a2009-07-08 18:16:09 -0700626 mcpy(top->fp, sp, args);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700627 }
628
Russ Cox7343e032009-06-17 15:12:16 -0700629 stackfree((byte*)g1->stackguard - StackGuard);
630 g1->stackbase = old.stackbase;
631 g1->stackguard = old.stackguard;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700632
Russ Cox7343e032009-06-17 15:12:16 -0700633 gogo(&old.gobuf, m->cret);
Russ Cox79e1db22008-12-04 08:30:54 -0800634}
635
636void
Ken Thompsonaf58f172008-07-14 14:34:27 -0700637newstack(void)
638{
Russ Cox95100342009-04-01 00:26:00 -0700639 int32 frame, args;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700640 Stktop *top;
641 byte *stk, *sp;
Russ Cox7343e032009-06-17 15:12:16 -0700642 G *g1;
643 Gobuf label;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700644
Russ Cox7343e032009-06-17 15:12:16 -0700645 frame = m->moreframe;
646 args = m->moreargs;
Russ Coxbba278a2009-07-08 18:16:09 -0700647
Russ Cox7343e032009-06-17 15:12:16 -0700648 // Round up to align things nicely.
649 // This is sufficient for both 32- and 64-bit machines.
650 args = (args+7) & ~7;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700651
Russ Cox95100342009-04-01 00:26:00 -0700652 if(frame < StackBig)
653 frame = StackBig;
654 frame += 1024; // for more functions, Stktop.
655 stk = stackalloc(frame);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700656
Russ Coxbba278a2009-07-08 18:16:09 -0700657//printf("newstack frame=%d args=%d morepc=%p morefp=%p gobuf=%p, %p newstk=%p\n", frame, args, m->morepc, m->morefp, g->sched.pc, g->sched.sp, stk);
Russ Cox7343e032009-06-17 15:12:16 -0700658
659 g1 = m->curg;
Russ Cox95100342009-04-01 00:26:00 -0700660 top = (Stktop*)(stk+frame-sizeof(*top));
Russ Cox7343e032009-06-17 15:12:16 -0700661 top->stackbase = g1->stackbase;
662 top->stackguard = g1->stackguard;
663 top->gobuf = m->morebuf;
Russ Coxbba278a2009-07-08 18:16:09 -0700664 top->fp = m->morefp;
Russ Cox7343e032009-06-17 15:12:16 -0700665 top->args = args;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700666
Russ Cox7343e032009-06-17 15:12:16 -0700667 g1->stackbase = (byte*)top;
668 g1->stackguard = stk + StackGuard;
Ken Thompsonaf58f172008-07-14 14:34:27 -0700669
670 sp = (byte*)top;
Russ Cox95100342009-04-01 00:26:00 -0700671 if(args > 0) {
Russ Cox95100342009-04-01 00:26:00 -0700672 sp -= args;
Russ Coxbba278a2009-07-08 18:16:09 -0700673 mcpy(sp, m->morefp, args);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700674 }
675
Russ Cox7343e032009-06-17 15:12:16 -0700676 // Continue as if lessstack had just called m->morepc
677 // (the PC that decided to grow the stack).
678 label.sp = sp;
679 label.pc = (byte*)sys·lessstack;
680 label.g = m->curg;
681 gogocall(&label, m->morepc);
Ken Thompsonaf58f172008-07-14 14:34:27 -0700682
683 *(int32*)345 = 123; // never return
684}
685
Russ Cox95100342009-04-01 00:26:00 -0700686G*
687malg(int32 stacksize)
688{
689 G *g;
690 byte *stk;
691
692 g = malloc(sizeof(G));
693 stk = stackalloc(stacksize + StackGuard);
694 g->stack0 = stk;
695 g->stackguard = stk + StackGuard;
696 g->stackbase = stk + StackGuard + stacksize;
697 return g;
698}
699
700/*
701 * Newproc and deferproc need to be textflag 7
702 * (no possible stack split when nearing overflow)
703 * because they assume that the arguments to fn
704 * are available sequentially beginning at &arg0.
705 * If a stack split happened, only the one word
706 * arg0 would be copied. It's okay if any functions
707 * they call split the stack below the newproc frame.
708 */
709#pragma textflag 7
710void
711sys·newproc(int32 siz, byte* fn, byte* arg0)
712{
713 byte *stk, *sp;
714 G *newg;
715
716//printf("newproc siz=%d fn=%p", siz, fn);
717
718 siz = (siz+7) & ~7;
719 if(siz > 1024)
720 throw("sys·newproc: too many args");
721
722 lock(&sched);
723
724 if((newg = gfget()) != nil){
725 newg->status = Gwaiting;
726 } else {
727 newg = malg(4096);
728 newg->status = Gwaiting;
729 newg->alllink = allg;
730 allg = newg;
731 }
732 stk = newg->stack0;
733
734 newg->stackguard = stk+StackGuard;
735
736 sp = stk + 4096 - 4*8;
737 newg->stackbase = sp;
738
739 sp -= siz;
740 mcpy(sp, (byte*)&arg0, siz);
741
Russ Cox7343e032009-06-17 15:12:16 -0700742 newg->sched.sp = sp;
743 newg->sched.pc = (byte*)goexit;
744 newg->sched.g = newg;
745 newg->entry = fn;
Russ Cox95100342009-04-01 00:26:00 -0700746
747 sched.gcount++;
748 goidgen++;
749 newg->goid = goidgen;
750
Russ Coxfe8ff952009-08-31 18:10:11 -0700751 newprocreadylocked(newg);
Russ Cox95100342009-04-01 00:26:00 -0700752 unlock(&sched);
753
754//printf(" goid=%d\n", newg->goid);
755}
756
757#pragma textflag 7
758void
759sys·deferproc(int32 siz, byte* fn, byte* arg0)
760{
761 Defer *d;
762
763 d = malloc(sizeof(*d) + siz - sizeof(d->args));
764 d->fn = fn;
765 d->sp = (byte*)&arg0;
766 d->siz = siz;
767 mcpy(d->args, d->sp, d->siz);
768
769 d->link = g->defer;
770 g->defer = d;
771}
772
773#pragma textflag 7
774void
Russ Coxaa3222d82009-06-02 23:02:12 -0700775sys·deferreturn(uintptr arg0)
Russ Cox95100342009-04-01 00:26:00 -0700776{
Russ Cox95100342009-04-01 00:26:00 -0700777 Defer *d;
Russ Coxaa3222d82009-06-02 23:02:12 -0700778 byte *sp, *fn;
Russ Cox95100342009-04-01 00:26:00 -0700779
780 d = g->defer;
781 if(d == nil)
782 return;
783 sp = (byte*)&arg0;
784 if(d->sp != sp)
785 return;
786 mcpy(d->sp, d->args, d->siz);
787 g->defer = d->link;
Russ Coxaa3222d82009-06-02 23:02:12 -0700788 fn = d->fn;
Russ Cox95100342009-04-01 00:26:00 -0700789 free(d);
Russ Coxaa3222d82009-06-02 23:02:12 -0700790 jmpdefer(fn, sp);
791 }
Russ Cox79e1db22008-12-04 08:30:54 -0800792
Russ Cox918afd942009-05-08 15:21:41 -0700793void
794runtime·Breakpoint(void)
795{
796 breakpoint();
797}
798
799void
800runtime·Goexit(void)
801{
802 goexit();
803}
804
805void
806runtime·Gosched(void)
807{
808 gosched();
809}
810
Russ Cox218c3932009-07-13 17:28:39 -0700811void
812runtime·LockOSThread(void)
813{
814 if(sched.predawn)
815 throw("cannot wire during init");
816 m->lockedg = g;
817 g->lockedm = m;
818}
819
Rob Pike79554902009-08-06 13:07:05 -0700820// delete when scheduler is stronger
821void
822runtime·GOMAXPROCS(int32 n)
823{
824 if(n < 1)
825 n = 1;
826
827 lock(&sched);
828 sched.gomaxprocs = n;
829 sched.mcpumax = n;
830 // handle fewer procs
831 while(sched.mcpu > sched.mcpumax) {
832 noteclear(&sched.stopped);
833 sched.waitstop = 1;
834 unlock(&sched);
835 notesleep(&sched.stopped);
836 lock(&sched);
837 }
838 // handle more procs
839 matchmg();
840 unlock(&sched);
841}
842
Russ Cox218c3932009-07-13 17:28:39 -0700843void
844runtime·UnlockOSThread(void)
845{
846 m->lockedg = nil;
847 g->lockedm = nil;
848}
849
850// for testing of wire, unwire
851void
852runtime·mid(uint32 ret)
853{
854 ret = m->id;
855 FLUSH(&ret);
856}