blob: f9d7846d7e47890499ebf837035f92bc88fbd99e [file] [log] [blame]
Keith Randall0c6b55e2014-07-16 14:16:19 -07001// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8 "unsafe"
9)
10
11func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
12 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -040013 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -040014 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
Keith Randall0c6b55e2014-07-16 14:16:19 -070015 }
16 if h == nil || h.count == 0 {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +120017 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -070018 }
19 var b *bmap
20 if h.B == 0 {
21 // One-bucket table. No need to hash.
22 b = (*bmap)(h.buckets)
23 } else {
Keith Randalld5e4c402015-01-06 16:42:48 -080024 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -070025 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -070026 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -070027 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -070028 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -070029 if !evacuated(oldb) {
30 b = oldb
31 }
32 }
33 }
34 for {
35 for i := uintptr(0); i < bucketCnt; i++ {
36 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
37 if k != key {
38 continue
39 }
Keith Randall668a55a2014-08-01 14:38:56 -070040 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
41 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -070042 continue
43 }
Keith Randall668a55a2014-08-01 14:38:56 -070044 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -070045 }
Keith Randallfbc56cf2014-12-19 20:44:18 -080046 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -070047 if b == nil {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +120048 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -070049 }
50 }
51}
52
53func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
54 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -040055 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -040056 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
Keith Randall0c6b55e2014-07-16 14:16:19 -070057 }
58 if h == nil || h.count == 0 {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +120059 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -070060 }
61 var b *bmap
62 if h.B == 0 {
63 // One-bucket table. No need to hash.
64 b = (*bmap)(h.buckets)
65 } else {
Keith Randalld5e4c402015-01-06 16:42:48 -080066 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -070067 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -070068 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -070069 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -070070 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -070071 if !evacuated(oldb) {
72 b = oldb
73 }
74 }
75 }
76 for {
77 for i := uintptr(0); i < bucketCnt; i++ {
78 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
79 if k != key {
80 continue
81 }
Keith Randall668a55a2014-08-01 14:38:56 -070082 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
83 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -070084 continue
85 }
Keith Randall668a55a2014-08-01 14:38:56 -070086 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
Keith Randall0c6b55e2014-07-16 14:16:19 -070087 }
Keith Randallfbc56cf2014-12-19 20:44:18 -080088 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -070089 if b == nil {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +120090 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -070091 }
92 }
93}
94
95func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
96 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -040097 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -040098 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
Keith Randall0c6b55e2014-07-16 14:16:19 -070099 }
100 if h == nil || h.count == 0 {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200101 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700102 }
103 var b *bmap
104 if h.B == 0 {
105 // One-bucket table. No need to hash.
106 b = (*bmap)(h.buckets)
107 } else {
Keith Randalld5e4c402015-01-06 16:42:48 -0800108 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700109 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700110 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700111 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700112 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700113 if !evacuated(oldb) {
114 b = oldb
115 }
116 }
117 }
118 for {
119 for i := uintptr(0); i < bucketCnt; i++ {
120 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
121 if k != key {
122 continue
123 }
Keith Randall668a55a2014-08-01 14:38:56 -0700124 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
125 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700126 continue
127 }
Keith Randall668a55a2014-08-01 14:38:56 -0700128 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700129 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800130 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700131 if b == nil {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200132 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700133 }
134 }
135}
136
137func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
138 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400139 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400140 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700141 }
142 if h == nil || h.count == 0 {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200143 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700144 }
145 var b *bmap
146 if h.B == 0 {
147 // One-bucket table. No need to hash.
148 b = (*bmap)(h.buckets)
149 } else {
Keith Randalld5e4c402015-01-06 16:42:48 -0800150 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700151 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700152 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700153 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700154 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700155 if !evacuated(oldb) {
156 b = oldb
157 }
158 }
159 }
160 for {
161 for i := uintptr(0); i < bucketCnt; i++ {
162 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
163 if k != key {
164 continue
165 }
Keith Randall668a55a2014-08-01 14:38:56 -0700166 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
167 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700168 continue
169 }
Keith Randall668a55a2014-08-01 14:38:56 -0700170 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
Keith Randall0c6b55e2014-07-16 14:16:19 -0700171 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800172 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700173 if b == nil {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200174 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700175 }
176 }
177}
178
179func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
180 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400181 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400182 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700183 }
184 if h == nil || h.count == 0 {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200185 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700186 }
187 key := (*stringStruct)(unsafe.Pointer(&ky))
188 if h.B == 0 {
189 // One-bucket table.
190 b := (*bmap)(h.buckets)
191 if key.len < 32 {
192 // short key, doing lots of comparisons is ok
193 for i := uintptr(0); i < bucketCnt; i++ {
Keith Randall668a55a2014-08-01 14:38:56 -0700194 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
195 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700196 continue
197 }
198 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
199 if k.len != key.len {
200 continue
201 }
Keith Randall7aa4e5a2014-08-07 14:52:55 -0700202 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
Keith Randall668a55a2014-08-01 14:38:56 -0700203 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700204 }
205 }
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200206 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700207 }
208 // long key, try not to do more comparisons than necessary
209 keymaybe := uintptr(bucketCnt)
210 for i := uintptr(0); i < bucketCnt; i++ {
Keith Randall668a55a2014-08-01 14:38:56 -0700211 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
212 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700213 continue
214 }
215 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
216 if k.len != key.len {
217 continue
218 }
219 if k.str == key.str {
Keith Randall668a55a2014-08-01 14:38:56 -0700220 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700221 }
222 // check first 4 bytes
223 // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
224 // four 1-byte comparisons.
225 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
226 continue
227 }
228 // check last 4 bytes
229 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
230 continue
231 }
232 if keymaybe != bucketCnt {
233 // Two keys are potential matches. Use hash to distinguish them.
234 goto dohash
235 }
236 keymaybe = i
237 }
238 if keymaybe != bucketCnt {
239 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
Keith Randall7aa4e5a2014-08-07 14:52:55 -0700240 if memeq(k.str, key.str, uintptr(key.len)) {
Keith Randall668a55a2014-08-01 14:38:56 -0700241 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700242 }
243 }
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200244 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700245 }
246dohash:
Keith Randalld5e4c402015-01-06 16:42:48 -0800247 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700248 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700249 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700250 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700251 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700252 if !evacuated(oldb) {
253 b = oldb
254 }
255 }
256 top := uint8(hash >> (ptrSize*8 - 8))
257 if top < minTopHash {
258 top += minTopHash
259 }
260 for {
261 for i := uintptr(0); i < bucketCnt; i++ {
Keith Randall668a55a2014-08-01 14:38:56 -0700262 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
263 if x != top {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700264 continue
265 }
266 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
267 if k.len != key.len {
268 continue
269 }
Keith Randall7aa4e5a2014-08-07 14:52:55 -0700270 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
Keith Randall668a55a2014-08-01 14:38:56 -0700271 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700272 }
273 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800274 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700275 if b == nil {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200276 return atomicloadp(unsafe.Pointer(&zeroptr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700277 }
278 }
279}
280
281func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
282 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400283 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400284 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700285 }
286 if h == nil || h.count == 0 {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200287 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700288 }
289 key := (*stringStruct)(unsafe.Pointer(&ky))
290 if h.B == 0 {
291 // One-bucket table.
292 b := (*bmap)(h.buckets)
293 if key.len < 32 {
294 // short key, doing lots of comparisons is ok
295 for i := uintptr(0); i < bucketCnt; i++ {
Keith Randall668a55a2014-08-01 14:38:56 -0700296 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
297 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700298 continue
299 }
300 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
301 if k.len != key.len {
302 continue
303 }
Keith Randall7aa4e5a2014-08-07 14:52:55 -0700304 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
Keith Randall668a55a2014-08-01 14:38:56 -0700305 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
Keith Randall0c6b55e2014-07-16 14:16:19 -0700306 }
307 }
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200308 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700309 }
310 // long key, try not to do more comparisons than necessary
311 keymaybe := uintptr(bucketCnt)
312 for i := uintptr(0); i < bucketCnt; i++ {
Keith Randall668a55a2014-08-01 14:38:56 -0700313 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
314 if x == empty {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700315 continue
316 }
317 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
318 if k.len != key.len {
319 continue
320 }
321 if k.str == key.str {
Keith Randall668a55a2014-08-01 14:38:56 -0700322 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
Keith Randall0c6b55e2014-07-16 14:16:19 -0700323 }
324 // check first 4 bytes
325 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
326 continue
327 }
328 // check last 4 bytes
329 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
330 continue
331 }
332 if keymaybe != bucketCnt {
333 // Two keys are potential matches. Use hash to distinguish them.
334 goto dohash
335 }
336 keymaybe = i
337 }
338 if keymaybe != bucketCnt {
339 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
Keith Randall7aa4e5a2014-08-07 14:52:55 -0700340 if memeq(k.str, key.str, uintptr(key.len)) {
Keith Randall668a55a2014-08-01 14:38:56 -0700341 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
Keith Randall0c6b55e2014-07-16 14:16:19 -0700342 }
343 }
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200344 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700345 }
346dohash:
Keith Randalld5e4c402015-01-06 16:42:48 -0800347 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700348 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700349 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700350 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700351 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700352 if !evacuated(oldb) {
353 b = oldb
354 }
355 }
356 top := uint8(hash >> (ptrSize*8 - 8))
357 if top < minTopHash {
358 top += minTopHash
359 }
360 for {
361 for i := uintptr(0); i < bucketCnt; i++ {
Keith Randall668a55a2014-08-01 14:38:56 -0700362 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
363 if x != top {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700364 continue
365 }
366 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
367 if k.len != key.len {
368 continue
369 }
Keith Randall7aa4e5a2014-08-07 14:52:55 -0700370 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
Keith Randall668a55a2014-08-01 14:38:56 -0700371 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
Keith Randall0c6b55e2014-07-16 14:16:19 -0700372 }
373 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800374 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700375 if b == nil {
Michael Hudson-Doyle38519e62015-08-21 14:54:55 +1200376 return atomicloadp(unsafe.Pointer(&zeroptr)), false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700377 }
378 }
379}