blob: 47cb88335b2ed2bb23b0f48dca0d7b42029becca [file] [log] [blame]
Russ Cox3a7f6642014-08-29 16:20:48 -04001// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build darwin nacl netbsd openbsd plan9 solaris windows
6
7package runtime
8
9import "unsafe"
10
11// This implementation depends on OS-specific implementations of
12//
13// uintptr runtime·semacreate(void)
14// Create a semaphore, which will be assigned to m->waitsema.
15// The zero value is treated as absence of any semaphore,
16// so be sure to return a non-zero value.
17//
18// int32 runtime·semasleep(int64 ns)
19// If ns < 0, acquire m->waitsema and return 0.
20// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
21// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
22//
23// int32 runtime·semawakeup(M *mp)
24// Wake up mp, which is or will soon be sleeping on mp->waitsema.
25//
26const (
27 locked uintptr = 1
28
29 active_spin = 4
30 active_spin_cnt = 30
31 passive_spin = 1
32)
33
Russ Cox3a7f6642014-08-29 16:20:48 -040034func lock(l *mutex) {
35 gp := getg()
36 if gp.m.locks < 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -080037 throw("runtime·lock: lock count")
Russ Cox3a7f6642014-08-29 16:20:48 -040038 }
39 gp.m.locks++
40
41 // Speculative grab for lock.
42 if casuintptr(&l.key, 0, locked) {
43 return
44 }
45 if gp.m.waitsema == 0 {
46 gp.m.waitsema = semacreate()
47 }
48
49 // On uniprocessor's, no point spinning.
50 // On multiprocessors, spin for ACTIVE_SPIN attempts.
51 spin := 0
52 if ncpu > 1 {
53 spin = active_spin
54 }
55Loop:
56 for i := 0; ; i++ {
57 v := atomicloaduintptr(&l.key)
58 if v&locked == 0 {
59 // Unlocked. Try to lock.
60 if casuintptr(&l.key, v, v|locked) {
61 return
62 }
63 i = 0
64 }
65 if i < spin {
66 procyield(active_spin_cnt)
67 } else if i < spin+passive_spin {
68 osyield()
69 } else {
70 // Someone else has it.
71 // l->waitm points to a linked list of M's waiting
72 // for this lock, chained through m->nextwaitm.
73 // Queue this M.
74 for {
75 gp.m.nextwaitm = (*m)((unsafe.Pointer)(v &^ locked))
76 if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
77 break
78 }
79 v = atomicloaduintptr(&l.key)
80 if v&locked == 0 {
81 continue Loop
82 }
83 }
84 if v&locked != 0 {
85 // Queued. Wait.
86 semasleep(-1)
87 i = 0
88 }
89 }
90 }
91}
92
93func unlock(l *mutex) {
94 gp := getg()
95 var mp *m
96 for {
97 v := atomicloaduintptr(&l.key)
98 if v == locked {
99 if casuintptr(&l.key, locked, 0) {
100 break
101 }
102 } else {
103 // Other M's are waiting for the lock.
104 // Dequeue an M.
105 mp = (*m)((unsafe.Pointer)(v &^ locked))
106 if casuintptr(&l.key, v, uintptr(unsafe.Pointer(mp.nextwaitm))) {
107 // Dequeued an M. Wake it.
108 semawakeup(mp)
109 break
110 }
111 }
112 }
113 gp.m.locks--
114 if gp.m.locks < 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800115 throw("runtime·unlock: lock count")
Russ Cox3a7f6642014-08-29 16:20:48 -0400116 }
117 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
Russ Coxe6d35112015-01-05 16:29:21 +0000118 gp.stackguard0 = stackPreempt
Russ Cox3a7f6642014-08-29 16:20:48 -0400119 }
120}
121
122// One-time notifications.
123func noteclear(n *note) {
124 n.key = 0
125}
126
127func notewakeup(n *note) {
128 var v uintptr
129 for {
130 v = atomicloaduintptr(&n.key)
131 if casuintptr(&n.key, v, locked) {
132 break
133 }
134 }
135
136 // Successfully set waitm to locked.
137 // What was it before?
138 switch {
139 case v == 0:
140 // Nothing was waiting. Done.
141 case v == locked:
142 // Two notewakeups! Not allowed.
Keith Randallb2a950b2014-12-27 20:58:00 -0800143 throw("notewakeup - double wakeup")
Russ Cox3a7f6642014-08-29 16:20:48 -0400144 default:
145 // Must be the waiting m. Wake it up.
146 semawakeup((*m)(unsafe.Pointer(v)))
147 }
148}
149
150func notesleep(n *note) {
151 gp := getg()
152 if gp != gp.m.g0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800153 throw("notesleep not on g0")
Russ Cox3a7f6642014-08-29 16:20:48 -0400154 }
155 if gp.m.waitsema == 0 {
156 gp.m.waitsema = semacreate()
157 }
158 if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
159 // Must be locked (got wakeup).
160 if n.key != locked {
Keith Randallb2a950b2014-12-27 20:58:00 -0800161 throw("notesleep - waitm out of sync")
Russ Cox3a7f6642014-08-29 16:20:48 -0400162 }
163 return
164 }
165 // Queued. Sleep.
166 gp.m.blocked = true
167 semasleep(-1)
168 gp.m.blocked = false
169}
170
171//go:nosplit
Russ Cox93805d72014-09-03 23:10:15 -0400172func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
173 // gp and deadline are logically local variables, but they are written
174 // as parameters so that the stack space they require is charged
175 // to the caller.
176 // This reduces the nosplit footprint of notetsleep_internal.
177 gp = getg()
178
Russ Cox3a7f6642014-08-29 16:20:48 -0400179 // Register for wakeup on n->waitm.
180 if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
181 // Must be locked (got wakeup).
182 if n.key != locked {
Keith Randallb2a950b2014-12-27 20:58:00 -0800183 throw("notetsleep - waitm out of sync")
Russ Cox3a7f6642014-08-29 16:20:48 -0400184 }
185 return true
186 }
187 if ns < 0 {
188 // Queued. Sleep.
189 gp.m.blocked = true
190 semasleep(-1)
191 gp.m.blocked = false
192 return true
193 }
Russ Cox93805d72014-09-03 23:10:15 -0400194
195 deadline = nanotime() + ns
Russ Cox3a7f6642014-08-29 16:20:48 -0400196 for {
197 // Registered. Sleep.
198 gp.m.blocked = true
199 if semasleep(ns) >= 0 {
200 gp.m.blocked = false
201 // Acquired semaphore, semawakeup unregistered us.
202 // Done.
203 return true
204 }
205 gp.m.blocked = false
206 // Interrupted or timed out. Still registered. Semaphore not acquired.
207 ns = deadline - nanotime()
208 if ns <= 0 {
209 break
210 }
211 // Deadline hasn't arrived. Keep sleeping.
212 }
213
214 // Deadline arrived. Still registered. Semaphore not acquired.
215 // Want to give up and return, but have to unregister first,
216 // so that any notewakeup racing with the return does not
217 // try to grant us the semaphore when we don't expect it.
218 for {
219 v := atomicloaduintptr(&n.key)
220 switch v {
221 case uintptr(unsafe.Pointer(gp.m)):
222 // No wakeup yet; unregister if possible.
223 if casuintptr(&n.key, v, 0) {
224 return false
225 }
226 case locked:
227 // Wakeup happened so semaphore is available.
228 // Grab it to avoid getting out of sync.
229 gp.m.blocked = true
230 if semasleep(-1) < 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800231 throw("runtime: unable to acquire - semaphore out of sync")
Russ Cox3a7f6642014-08-29 16:20:48 -0400232 }
233 gp.m.blocked = false
234 return true
235 default:
Keith Randallb2a950b2014-12-27 20:58:00 -0800236 throw("runtime: unexpected waitm - semaphore out of sync")
Russ Cox3a7f6642014-08-29 16:20:48 -0400237 }
238 }
239}
240
241func notetsleep(n *note, ns int64) bool {
242 gp := getg()
Austin Clements28b51182015-01-30 15:30:41 -0500243 if gp != gp.m.g0 && gp.m.preemptoff != "" {
Keith Randallb2a950b2014-12-27 20:58:00 -0800244 throw("notetsleep not on g0")
Russ Cox3a7f6642014-08-29 16:20:48 -0400245 }
246 if gp.m.waitsema == 0 {
247 gp.m.waitsema = semacreate()
248 }
Russ Cox93805d72014-09-03 23:10:15 -0400249 return notetsleep_internal(n, ns, nil, 0)
Russ Cox3a7f6642014-08-29 16:20:48 -0400250}
251
252// same as runtime·notetsleep, but called on user g (not g0)
253// calls only nosplit functions between entersyscallblock/exitsyscall
254func notetsleepg(n *note, ns int64) bool {
255 gp := getg()
256 if gp == gp.m.g0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800257 throw("notetsleepg on g0")
Russ Cox3a7f6642014-08-29 16:20:48 -0400258 }
259 if gp.m.waitsema == 0 {
260 gp.m.waitsema = semacreate()
261 }
Russ Coxb2cdf302014-11-11 17:08:33 -0500262 entersyscallblock(0)
Russ Cox93805d72014-09-03 23:10:15 -0400263 ok := notetsleep_internal(n, ns, nil, 0)
Russ Coxb2cdf302014-11-11 17:08:33 -0500264 exitsyscall(0)
Russ Cox3a7f6642014-08-29 16:20:48 -0400265 return ok
266}