blob: 1765a6ce66aacda079ec449e09e2c219790bf661 [file] [log] [blame]
Russ Cox3a7f6642014-08-29 16:20:48 -04001// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build dragonfly freebsd linux
6
7package runtime
8
9import "unsafe"
10
11// This implementation depends on OS-specific implementations of
12//
13// runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
14// Atomically,
15// if(*addr == val) sleep
16// Might be woken up spuriously; that's allowed.
17// Don't sleep longer than ns; ns < 0 means forever.
18//
19// runtime·futexwakeup(uint32 *addr, uint32 cnt)
20// If any procs are sleeping on addr, wake up at most cnt.
21
22const (
23 mutex_unlocked = 0
24 mutex_locked = 1
25 mutex_sleeping = 2
26
27 active_spin = 4
28 active_spin_cnt = 30
29 passive_spin = 1
30)
31
32// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
33// mutex_sleeping means that there is presumably at least one sleeping thread.
34// Note that there can be spinning threads during all states - they do not
35// affect mutex's state.
36
Russ Cox3a7f6642014-08-29 16:20:48 -040037// We use the uintptr mutex.key and note.key as a uint32.
38func key32(p *uintptr) *uint32 {
39 return (*uint32)(unsafe.Pointer(p))
40}
41
42func lock(l *mutex) {
43 gp := getg()
44
45 if gp.m.locks < 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -080046 throw("runtime·lock: lock count")
Russ Cox3a7f6642014-08-29 16:20:48 -040047 }
48 gp.m.locks++
49
50 // Speculative grab for lock.
51 v := xchg(key32(&l.key), mutex_locked)
52 if v == mutex_unlocked {
53 return
54 }
55
56 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
57 // depending on whether there is a thread sleeping
58 // on this mutex. If we ever change l->key from
59 // MUTEX_SLEEPING to some other value, we must be
60 // careful to change it back to MUTEX_SLEEPING before
61 // returning, to ensure that the sleeping thread gets
62 // its wakeup call.
63 wait := v
64
65 // On uniprocessors, no point spinning.
66 // On multiprocessors, spin for ACTIVE_SPIN attempts.
67 spin := 0
68 if ncpu > 1 {
69 spin = active_spin
70 }
71 for {
72 // Try for lock, spinning.
73 for i := 0; i < spin; i++ {
74 for l.key == mutex_unlocked {
75 if cas(key32(&l.key), mutex_unlocked, wait) {
76 return
77 }
78 }
79 procyield(active_spin_cnt)
80 }
81
82 // Try for lock, rescheduling.
83 for i := 0; i < passive_spin; i++ {
84 for l.key == mutex_unlocked {
85 if cas(key32(&l.key), mutex_unlocked, wait) {
86 return
87 }
88 }
89 osyield()
90 }
91
92 // Sleep.
93 v = xchg(key32(&l.key), mutex_sleeping)
94 if v == mutex_unlocked {
95 return
96 }
97 wait = mutex_sleeping
98 futexsleep(key32(&l.key), mutex_sleeping, -1)
99 }
100}
101
102func unlock(l *mutex) {
103 v := xchg(key32(&l.key), mutex_unlocked)
104 if v == mutex_unlocked {
Keith Randallb2a950b2014-12-27 20:58:00 -0800105 throw("unlock of unlocked lock")
Russ Cox3a7f6642014-08-29 16:20:48 -0400106 }
107 if v == mutex_sleeping {
108 futexwakeup(key32(&l.key), 1)
109 }
110
111 gp := getg()
112 gp.m.locks--
113 if gp.m.locks < 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800114 throw("runtime·unlock: lock count")
Russ Cox3a7f6642014-08-29 16:20:48 -0400115 }
116 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
Russ Coxe6d35112015-01-05 16:29:21 +0000117 gp.stackguard0 = stackPreempt
Russ Cox3a7f6642014-08-29 16:20:48 -0400118 }
119}
120
121// One-time notifications.
122func noteclear(n *note) {
123 n.key = 0
124}
125
126func notewakeup(n *note) {
127 old := xchg(key32(&n.key), 1)
128 if old != 0 {
129 print("notewakeup - double wakeup (", old, ")\n")
Keith Randallb2a950b2014-12-27 20:58:00 -0800130 throw("notewakeup - double wakeup")
Russ Cox3a7f6642014-08-29 16:20:48 -0400131 }
132 futexwakeup(key32(&n.key), 1)
133}
134
135func notesleep(n *note) {
136 gp := getg()
137 if gp != gp.m.g0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800138 throw("notesleep not on g0")
Russ Cox3a7f6642014-08-29 16:20:48 -0400139 }
140 for atomicload(key32(&n.key)) == 0 {
141 gp.m.blocked = true
142 futexsleep(key32(&n.key), 0, -1)
143 gp.m.blocked = false
144 }
145}
146
147//go:nosplit
148func notetsleep_internal(n *note, ns int64) bool {
149 gp := getg()
150
151 if ns < 0 {
152 for atomicload(key32(&n.key)) == 0 {
153 gp.m.blocked = true
154 futexsleep(key32(&n.key), 0, -1)
155 gp.m.blocked = false
156 }
157 return true
158 }
159
160 if atomicload(key32(&n.key)) != 0 {
161 return true
162 }
163
164 deadline := nanotime() + ns
165 for {
166 gp.m.blocked = true
167 futexsleep(key32(&n.key), 0, ns)
168 gp.m.blocked = false
169 if atomicload(key32(&n.key)) != 0 {
170 break
171 }
172 now := nanotime()
173 if now >= deadline {
174 break
175 }
176 ns = deadline - now
177 }
178 return atomicload(key32(&n.key)) != 0
179}
180
181func notetsleep(n *note, ns int64) bool {
182 gp := getg()
Austin Clements28b51182015-01-30 15:30:41 -0500183 if gp != gp.m.g0 && gp.m.preemptoff != "" {
Keith Randallb2a950b2014-12-27 20:58:00 -0800184 throw("notetsleep not on g0")
Russ Cox3a7f6642014-08-29 16:20:48 -0400185 }
186
187 return notetsleep_internal(n, ns)
188}
189
190// same as runtime·notetsleep, but called on user g (not g0)
191// calls only nosplit functions between entersyscallblock/exitsyscall
192func notetsleepg(n *note, ns int64) bool {
193 gp := getg()
194 if gp == gp.m.g0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800195 throw("notetsleepg on g0")
Russ Cox3a7f6642014-08-29 16:20:48 -0400196 }
197
Russ Coxb2cdf302014-11-11 17:08:33 -0500198 entersyscallblock(0)
Russ Cox3a7f6642014-08-29 16:20:48 -0400199 ok := notetsleep_internal(n, ns)
Russ Coxb2cdf302014-11-11 17:08:33 -0500200 exitsyscall(0)
Russ Cox3a7f6642014-08-29 16:20:48 -0400201 return ok
202}