Source file
src/runtime/lock_futex.go
Documentation: runtime
1
2
3
4
5
6
7
8 package runtime
9
10 import (
11 "runtime/internal/atomic"
12 "unsafe"
13 )
14
15
16
17
18
19
20
21
22
23
24
25
26 const (
27 mutex_unlocked = 0
28 mutex_locked = 1
29 mutex_sleeping = 2
30
31 active_spin = 4
32 active_spin_cnt = 30
33 passive_spin = 1
34 )
35
36
37
38
39
40
41
42
43 func key32(p *uintptr) *uint32 {
44 return (*uint32)(unsafe.Pointer(p))
45 }
46
47 func lock(l *mutex) {
48 lockWithRank(l, getLockRank(l))
49 }
50
51 func lock2(l *mutex) {
52 gp := getg()
53
54 if gp.m.locks < 0 {
55 throw("runtime·lock: lock count")
56 }
57 gp.m.locks++
58
59
60 v := atomic.Xchg(key32(&l.key), mutex_locked)
61 if v == mutex_unlocked {
62 return
63 }
64
65
66
67
68
69
70
71
72 wait := v
73
74
75
76 spin := 0
77 if ncpu > 1 {
78 spin = active_spin
79 }
80 for {
81
82 for i := 0; i < spin; i++ {
83 for l.key == mutex_unlocked {
84 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
85 return
86 }
87 }
88 procyield(active_spin_cnt)
89 }
90
91
92 for i := 0; i < passive_spin; i++ {
93 for l.key == mutex_unlocked {
94 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
95 return
96 }
97 }
98 osyield()
99 }
100
101
102 v = atomic.Xchg(key32(&l.key), mutex_sleeping)
103 if v == mutex_unlocked {
104 return
105 }
106 wait = mutex_sleeping
107 futexsleep(key32(&l.key), mutex_sleeping, -1)
108 }
109 }
110
111 func unlock(l *mutex) {
112 unlockWithRank(l)
113 }
114
115 func unlock2(l *mutex) {
116 v := atomic.Xchg(key32(&l.key), mutex_unlocked)
117 if v == mutex_unlocked {
118 throw("unlock of unlocked lock")
119 }
120 if v == mutex_sleeping {
121 futexwakeup(key32(&l.key), 1)
122 }
123
124 gp := getg()
125 gp.m.locks--
126 if gp.m.locks < 0 {
127 throw("runtime·unlock: lock count")
128 }
129 if gp.m.locks == 0 && gp.preempt {
130 gp.stackguard0 = stackPreempt
131 }
132 }
133
134
135 func noteclear(n *note) {
136 n.key = 0
137 }
138
139 func notewakeup(n *note) {
140 old := atomic.Xchg(key32(&n.key), 1)
141 if old != 0 {
142 print("notewakeup - double wakeup (", old, ")\n")
143 throw("notewakeup - double wakeup")
144 }
145 futexwakeup(key32(&n.key), 1)
146 }
147
148 func notesleep(n *note) {
149 gp := getg()
150 if gp != gp.m.g0 {
151 throw("notesleep not on g0")
152 }
153 ns := int64(-1)
154 if *cgo_yield != nil {
155
156 ns = 10e6
157 }
158 for atomic.Load(key32(&n.key)) == 0 {
159 gp.m.blocked = true
160 futexsleep(key32(&n.key), 0, ns)
161 if *cgo_yield != nil {
162 asmcgocall(*cgo_yield, nil)
163 }
164 gp.m.blocked = false
165 }
166 }
167
168
169
170
171
172
173 func notetsleep_internal(n *note, ns int64) bool {
174 gp := getg()
175
176 if ns < 0 {
177 if *cgo_yield != nil {
178
179 ns = 10e6
180 }
181 for atomic.Load(key32(&n.key)) == 0 {
182 gp.m.blocked = true
183 futexsleep(key32(&n.key), 0, ns)
184 if *cgo_yield != nil {
185 asmcgocall(*cgo_yield, nil)
186 }
187 gp.m.blocked = false
188 }
189 return true
190 }
191
192 if atomic.Load(key32(&n.key)) != 0 {
193 return true
194 }
195
196 deadline := nanotime() + ns
197 for {
198 if *cgo_yield != nil && ns > 10e6 {
199 ns = 10e6
200 }
201 gp.m.blocked = true
202 futexsleep(key32(&n.key), 0, ns)
203 if *cgo_yield != nil {
204 asmcgocall(*cgo_yield, nil)
205 }
206 gp.m.blocked = false
207 if atomic.Load(key32(&n.key)) != 0 {
208 break
209 }
210 now := nanotime()
211 if now >= deadline {
212 break
213 }
214 ns = deadline - now
215 }
216 return atomic.Load(key32(&n.key)) != 0
217 }
218
219 func notetsleep(n *note, ns int64) bool {
220 gp := getg()
221 if gp != gp.m.g0 && gp.m.preemptoff != "" {
222 throw("notetsleep not on g0")
223 }
224
225 return notetsleep_internal(n, ns)
226 }
227
228
229
230 func notetsleepg(n *note, ns int64) bool {
231 gp := getg()
232 if gp == gp.m.g0 {
233 throw("notetsleepg on g0")
234 }
235
236 entersyscallblock()
237 ok := notetsleep_internal(n, ns)
238 exitsyscall()
239 return ok
240 }
241
242 func beforeIdle(int64, int64) (*g, bool) {
243 return nil, false
244 }
245
246 func checkTimeouts() {}
247
View as plain text