blob: aaaef483811acf5ae7a6e022d38a9470af63248e [file] [log] [blame]
Keith Randall0c6b55e2014-07-16 14:16:19 -07001// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7// This file contains the implementation of Go's map type.
8//
9// A map is just a hash table. The data is arranged
10// into an array of buckets. Each bucket contains up to
11// 8 key/value pairs. The low-order bits of the hash are
12// used to select a bucket. Each bucket contains a few
13// high-order bits of each hash to distinguish the entries
14// within a single bucket.
15//
16// If more than 8 keys hash to a bucket, we chain on
17// extra buckets.
18//
19// When the hashtable grows, we allocate a new array
20// of buckets twice as big. Buckets are incrementally
21// copied from the old bucket array to the new bucket array.
22//
23// Map iterators walk through the array of buckets and
24// return the keys in walk order (bucket #, then overflow
25// chain order, then bucket index). To maintain iteration
26// semantics, we never move keys within their bucket (if
27// we did, keys might be returned 0 or 2 times). When
28// growing the table, iterators remain iterating through the
29// old table and must check the new table if the bucket
30// they are iterating through has been moved ("evacuated")
31// to the new table.
32
33// Picking loadFactor: too large and we have lots of overflow
34// buckets, too small and we waste a lot of space. I wrote
35// a simple program to check some stats for different loads:
36// (64-bit, 8 byte keys and values)
37// loadFactor %overflow bytes/entry hitprobe missprobe
38// 4.00 2.13 20.77 3.00 4.00
39// 4.50 4.05 17.30 3.25 4.50
40// 5.00 6.85 14.77 3.50 5.00
41// 5.50 10.55 12.94 3.75 5.50
42// 6.00 15.27 11.67 4.00 6.00
43// 6.50 20.90 10.79 4.25 6.50
44// 7.00 27.14 10.15 4.50 7.00
45// 7.50 34.03 9.73 4.75 7.50
46// 8.00 41.10 9.40 5.00 8.00
47//
48// %overflow = percentage of buckets which have an overflow bucket
49// bytes/entry = overhead bytes used per key/value pair
50// hitprobe = # of entries to check when looking up a present key
51// missprobe = # of entries to check when looking up an absent key
52//
53// Keep in mind this data is for maximally loaded tables, i.e. just
54// before the table grows. Typical tables will be somewhat less loaded.
55
56import (
57 "unsafe"
58)
59
60const (
61 // Maximum number of key/value pairs a bucket can hold.
Keith Randall251daf82014-09-09 14:22:58 -070062 bucketCntBits = 3
63 bucketCnt = 1 << bucketCntBits
Keith Randall0c6b55e2014-07-16 14:16:19 -070064
65 // Maximum average load of a bucket that triggers growth.
66 loadFactor = 6.5
67
68 // Maximum key or value size to keep inline (instead of mallocing per element).
69 // Must fit in a uint8.
70 // Fast versions cannot handle big values - the cutoff size for
Keith Randallcd5b1442015-03-11 12:58:47 -070071 // fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
Keith Randall0c6b55e2014-07-16 14:16:19 -070072 maxKeySize = 128
73 maxValueSize = 128
74
75 // data offset should be the size of the bmap struct, but needs to be
76 // aligned correctly. For amd64p32 this means 64-bit alignment
77 // even though pointers are 32 bit.
78 dataOffset = unsafe.Offsetof(struct {
79 b bmap
80 v int64
81 }{}.v)
82
83 // Possible tophash values. We reserve a few possibilities for special marks.
84 // Each bucket (including its overflow buckets, if any) will have either all or none of its
85 // entries in the evacuated* states (except during the evacuate() method, which only happens
86 // during map writes and thus no one else can observe the map during that time).
87 empty = 0 // cell is empty
88 evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
89 evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
90 evacuatedY = 3 // same as above, but evacuated to second half of larger table.
91 minTopHash = 4 // minimum tophash for a normal filled cell.
92
93 // flags
Keith Randall668a55a2014-08-01 14:38:56 -070094 iterator = 1 // there may be an iterator using buckets
95 oldIterator = 2 // there may be an iterator using oldbuckets
Keith Randall0c6b55e2014-07-16 14:16:19 -070096
97 // sentinel bucket ID for iterator checks
98 noCheck = 1<<(8*ptrSize) - 1
99
100 // trigger a garbage collection at every alloc called from this code
101 checkgc = false
102)
103
104// A header for a Go map.
105type hmap struct {
Keith Randallcd5b1442015-03-11 12:58:47 -0700106 // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
Keith Randall0c6b55e2014-07-16 14:16:19 -0700107 // ../reflect/type.go. Don't change this structure without also changing that code!
Keith Randall668a55a2014-08-01 14:38:56 -0700108 count int // # live cells == size of map. Must be first (used by len() builtin)
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300109 flags uint8
Keith Randall668a55a2014-08-01 14:38:56 -0700110 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300111 hash0 uint32 // hash seed
Keith Randall0c6b55e2014-07-16 14:16:19 -0700112
113 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
114 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
115 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300116
Dmitry Vyukov52dadc12015-02-14 16:10:06 +0300117 // If both key and value do not contain pointers and are inline, then we mark bucket
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300118 // type as containing no pointers. This avoids scanning such maps.
119 // However, bmap.overflow is a pointer. In order to keep overflow buckets
120 // alive, we store pointers to all overflow buckets in hmap.overflow.
121 // Overflow is used only if key and value do not contain pointers.
122 // overflow[0] contains overflow buckets for hmap.buckets.
123 // overflow[1] contains overflow buckets for hmap.oldbuckets.
124 // The first indirection allows us to reduce static size of hmap.
125 // The second indirection allows to store a pointer to the slice in hiter.
126 overflow *[2]*[]*bmap
Keith Randall0c6b55e2014-07-16 14:16:19 -0700127}
128
129// A bucket for a Go map.
130type bmap struct {
mattne26e3fa2014-12-26 11:44:55 +0900131 tophash [bucketCnt]uint8
Keith Randall0c6b55e2014-07-16 14:16:19 -0700132 // Followed by bucketCnt keys and then bucketCnt values.
133 // NOTE: packing all the keys together and then all the values together makes the
134 // code a bit more complicated than alternating key/value/key/value/... but it allows
135 // us to eliminate padding which would be needed for, e.g., map[int64]int8.
Keith Randallfbc56cf2014-12-19 20:44:18 -0800136 // Followed by an overflow pointer.
Keith Randall0c6b55e2014-07-16 14:16:19 -0700137}
138
139// A hash iteration structure.
Keith Randallcd5b1442015-03-11 12:58:47 -0700140// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
Keith Randall0c6b55e2014-07-16 14:16:19 -0700141// the layout of this structure.
142type hiter struct {
Keith Randallcd5b1442015-03-11 12:58:47 -0700143 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
144 value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
Keith Randall0c6b55e2014-07-16 14:16:19 -0700145 t *maptype
146 h *hmap
147 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
148 bptr *bmap // current bucket
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300149 overflow [2]*[]*bmap // keeps overflow buckets alive
Keith Randall55c458e2014-09-08 17:42:21 -0700150 startBucket uintptr // bucket iteration started at
Keith Randall0c6b55e2014-07-16 14:16:19 -0700151 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
Keith Randall55c458e2014-09-08 17:42:21 -0700152 wrapped bool // already wrapped around from end of bucket array to beginning
Keith Randall0c6b55e2014-07-16 14:16:19 -0700153 B uint8
Keith Randall251daf82014-09-09 14:22:58 -0700154 i uint8
Keith Randall0c6b55e2014-07-16 14:16:19 -0700155 bucket uintptr
Keith Randall0c6b55e2014-07-16 14:16:19 -0700156 checkBucket uintptr
157}
158
159func evacuated(b *bmap) bool {
160 h := b.tophash[0]
161 return h > empty && h < minTopHash
162}
163
Keith Randallfbc56cf2014-12-19 20:44:18 -0800164func (b *bmap) overflow(t *maptype) *bmap {
mattne26e3fa2014-12-26 11:44:55 +0900165 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize))
Keith Randallfbc56cf2014-12-19 20:44:18 -0800166}
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300167
168func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
169 if t.bucket.kind&kindNoPointers != 0 {
170 h.createOverflow()
171 *h.overflow[0] = append(*h.overflow[0], ovf)
172 }
mattne26e3fa2014-12-26 11:44:55 +0900173 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf
Keith Randallfbc56cf2014-12-19 20:44:18 -0800174}
175
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300176func (h *hmap) createOverflow() {
177 if h.overflow == nil {
178 h.overflow = new([2]*[]*bmap)
179 }
180 if h.overflow[0] == nil {
181 h.overflow[0] = new([]*bmap)
182 }
183}
184
Dmitry Vyukovb3be3602015-01-29 19:40:02 +0300185// makemap implements a Go map creation make(map[k]v, hint)
186// If the compiler has determined that the map or the first bucket
187// can be created on the stack, h and/or bucket may be non-nil.
188// If h != nil, the map can be created directly in h.
189// If bucket != nil, bucket can be used as the first bucket.
190func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
Dmitriy Vyukovfe3ee572014-07-29 22:06:20 +0400191 if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
Dmitry Vyukovb3be3602015-01-29 19:40:02 +0300192 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
Keith Randallb2a950b2014-12-27 20:58:00 -0800193 throw("bad hmap size")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700194 }
195
196 if hint < 0 || int64(int32(hint)) != hint {
197 panic("makemap: size out of range")
198 // TODO: make hint an int, then none of this nonsense
199 }
200
201 if !ismapkey(t.key) {
Keith Randallb2a950b2014-12-27 20:58:00 -0800202 throw("runtime.makemap: unsupported map key type")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700203 }
204
Keith Randall668a55a2014-08-01 14:38:56 -0700205 // check compiler's and reflect's math
Russ Coxd21638b2014-08-27 21:59:49 -0400206 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
207 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
Keith Randallb2a950b2014-12-27 20:58:00 -0800208 throw("key size wrong")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700209 }
Russ Coxd21638b2014-08-27 21:59:49 -0400210 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
211 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
Keith Randallb2a950b2014-12-27 20:58:00 -0800212 throw("value size wrong")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700213 }
214
215 // invariants we depend on. We should probably check these at compile time
216 // somewhere, but for now we'll do it here.
Keith Randall0c6b55e2014-07-16 14:16:19 -0700217 if t.key.align > bucketCnt {
Keith Randallb2a950b2014-12-27 20:58:00 -0800218 throw("key align too big")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700219 }
220 if t.elem.align > bucketCnt {
Keith Randallb2a950b2014-12-27 20:58:00 -0800221 throw("value align too big")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700222 }
223 if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800224 throw("key size not a multiple of key align")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700225 }
226 if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800227 throw("value size not a multiple of value align")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700228 }
229 if bucketCnt < 8 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800230 throw("bucketsize too small for proper alignment")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700231 }
232 if dataOffset%uintptr(t.key.align) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800233 throw("need padding in bucket (key)")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700234 }
235 if dataOffset%uintptr(t.elem.align) != 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800236 throw("need padding in bucket (value)")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700237 }
238
239 // find size parameter which will hold the requested # of elements
240 B := uint8(0)
241 for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
242 }
243
244 // allocate initial hash table
245 // if B == 0, the buckets field is allocated lazily later (in mapassign)
246 // If hint is large zeroing this memory could take a while.
Dmitry Vyukovb3be3602015-01-29 19:40:02 +0300247 buckets := bucket
Keith Randall0c6b55e2014-07-16 14:16:19 -0700248 if B != 0 {
249 if checkgc {
250 memstats.next_gc = memstats.heap_alloc
251 }
Keith Randall4aa50432014-07-30 09:01:52 -0700252 buckets = newarray(t.bucket, uintptr(1)<<B)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700253 }
254
255 // initialize Hmap
256 if checkgc {
257 memstats.next_gc = memstats.heap_alloc
258 }
Dmitry Vyukovb3be3602015-01-29 19:40:02 +0300259 if h == nil {
260 h = (*hmap)(newobject(t.hmap))
261 }
Keith Randall0c6b55e2014-07-16 14:16:19 -0700262 h.count = 0
263 h.B = B
Keith Randall668a55a2014-08-01 14:38:56 -0700264 h.flags = 0
Keith Randall3306d112014-09-02 14:33:33 -0700265 h.hash0 = fastrand1()
Keith Randall0c6b55e2014-07-16 14:16:19 -0700266 h.buckets = buckets
267 h.oldbuckets = nil
268 h.nevacuate = 0
269
270 return h
271}
272
273// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
274// it will return a reference to the zero object for the value type if
275// the key is not in the map.
276// NOTE: The returned pointer may keep the whole map live, so don't
277// hold onto it for very long.
278func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
279 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400280 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400281 pc := funcPC(mapaccess1)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700282 racereadpc(unsafe.Pointer(h), callerpc, pc)
283 raceReadObjectPC(t.key, key, callerpc, pc)
284 }
285 if h == nil || h.count == 0 {
286 return unsafe.Pointer(t.elem.zero)
287 }
Keith Randallb1f29b22014-12-27 20:32:11 -0800288 alg := t.key.alg
Keith Randalld5e4c402015-01-06 16:42:48 -0800289 hash := alg.hash(key, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700290 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700291 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700292 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700293 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700294 if !evacuated(oldb) {
295 b = oldb
296 }
297 }
298 top := uint8(hash >> (ptrSize*8 - 8))
299 if top < minTopHash {
300 top += minTopHash
301 }
302 for {
303 for i := uintptr(0); i < bucketCnt; i++ {
304 if b.tophash[i] != top {
305 continue
306 }
Keith Randall668a55a2014-08-01 14:38:56 -0700307 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
Russ Coxd21638b2014-08-27 21:59:49 -0400308 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700309 k = *((*unsafe.Pointer)(k))
310 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800311 if alg.equal(key, k) {
Keith Randall668a55a2014-08-01 14:38:56 -0700312 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
Russ Coxd21638b2014-08-27 21:59:49 -0400313 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700314 v = *((*unsafe.Pointer)(v))
315 }
316 return v
317 }
318 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800319 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700320 if b == nil {
321 return unsafe.Pointer(t.elem.zero)
322 }
323 }
324}
325
326func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
327 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400328 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400329 pc := funcPC(mapaccess2)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700330 racereadpc(unsafe.Pointer(h), callerpc, pc)
331 raceReadObjectPC(t.key, key, callerpc, pc)
332 }
333 if h == nil || h.count == 0 {
334 return unsafe.Pointer(t.elem.zero), false
335 }
Keith Randallb1f29b22014-12-27 20:32:11 -0800336 alg := t.key.alg
Keith Randalld5e4c402015-01-06 16:42:48 -0800337 hash := alg.hash(key, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700338 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700339 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700340 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700341 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700342 if !evacuated(oldb) {
343 b = oldb
344 }
345 }
346 top := uint8(hash >> (ptrSize*8 - 8))
347 if top < minTopHash {
348 top += minTopHash
349 }
350 for {
351 for i := uintptr(0); i < bucketCnt; i++ {
352 if b.tophash[i] != top {
353 continue
354 }
Keith Randall668a55a2014-08-01 14:38:56 -0700355 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
Russ Coxd21638b2014-08-27 21:59:49 -0400356 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700357 k = *((*unsafe.Pointer)(k))
358 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800359 if alg.equal(key, k) {
Keith Randall668a55a2014-08-01 14:38:56 -0700360 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
Russ Coxd21638b2014-08-27 21:59:49 -0400361 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700362 v = *((*unsafe.Pointer)(v))
363 }
364 return v, true
365 }
366 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800367 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700368 if b == nil {
369 return unsafe.Pointer(t.elem.zero), false
370 }
371 }
372}
373
374// returns both key and value. Used by map iterator
375func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
376 if h == nil || h.count == 0 {
377 return nil, nil
378 }
Keith Randallb1f29b22014-12-27 20:32:11 -0800379 alg := t.key.alg
Keith Randalld5e4c402015-01-06 16:42:48 -0800380 hash := alg.hash(key, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700381 m := uintptr(1)<<h.B - 1
Keith Randall668a55a2014-08-01 14:38:56 -0700382 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700383 if c := h.oldbuckets; c != nil {
Keith Randall668a55a2014-08-01 14:38:56 -0700384 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700385 if !evacuated(oldb) {
386 b = oldb
387 }
388 }
389 top := uint8(hash >> (ptrSize*8 - 8))
390 if top < minTopHash {
391 top += minTopHash
392 }
393 for {
394 for i := uintptr(0); i < bucketCnt; i++ {
395 if b.tophash[i] != top {
396 continue
397 }
Keith Randall668a55a2014-08-01 14:38:56 -0700398 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
Russ Coxd21638b2014-08-27 21:59:49 -0400399 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700400 k = *((*unsafe.Pointer)(k))
401 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800402 if alg.equal(key, k) {
Keith Randall668a55a2014-08-01 14:38:56 -0700403 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
Russ Coxd21638b2014-08-27 21:59:49 -0400404 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700405 v = *((*unsafe.Pointer)(v))
406 }
407 return k, v
408 }
409 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800410 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700411 if b == nil {
412 return nil, nil
413 }
414 }
415}
416
417func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
418 if h == nil {
419 panic("assignment to entry in nil map")
420 }
421 if raceenabled {
Russ Coxd21638b2014-08-27 21:59:49 -0400422 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400423 pc := funcPC(mapassign1)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700424 racewritepc(unsafe.Pointer(h), callerpc, pc)
425 raceReadObjectPC(t.key, key, callerpc, pc)
426 raceReadObjectPC(t.elem, val, callerpc, pc)
427 }
428
Keith Randallb1f29b22014-12-27 20:32:11 -0800429 alg := t.key.alg
Keith Randalld5e4c402015-01-06 16:42:48 -0800430 hash := alg.hash(key, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700431
432 if h.buckets == nil {
433 if checkgc {
434 memstats.next_gc = memstats.heap_alloc
435 }
Keith Randall4aa50432014-07-30 09:01:52 -0700436 h.buckets = newarray(t.bucket, 1)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700437 }
438
439again:
440 bucket := hash & (uintptr(1)<<h.B - 1)
441 if h.oldbuckets != nil {
442 growWork(t, h, bucket)
443 }
Keith Randall668a55a2014-08-01 14:38:56 -0700444 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700445 top := uint8(hash >> (ptrSize*8 - 8))
446 if top < minTopHash {
447 top += minTopHash
448 }
449
450 var inserti *uint8
451 var insertk unsafe.Pointer
452 var insertv unsafe.Pointer
453 for {
454 for i := uintptr(0); i < bucketCnt; i++ {
455 if b.tophash[i] != top {
456 if b.tophash[i] == empty && inserti == nil {
457 inserti = &b.tophash[i]
Keith Randall668a55a2014-08-01 14:38:56 -0700458 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
459 insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700460 }
461 continue
462 }
Keith Randall668a55a2014-08-01 14:38:56 -0700463 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700464 k2 := k
Russ Coxd21638b2014-08-27 21:59:49 -0400465 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700466 k2 = *((*unsafe.Pointer)(k2))
467 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800468 if !alg.equal(key, k2) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700469 continue
470 }
471 // already have a mapping for key. Update it.
Russ Cox54bb4dc2014-12-29 10:07:47 -0500472 typedmemmove(t.key, k2, key)
Keith Randall668a55a2014-08-01 14:38:56 -0700473 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700474 v2 := v
Russ Coxd21638b2014-08-27 21:59:49 -0400475 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700476 v2 = *((*unsafe.Pointer)(v2))
477 }
Russ Cox54bb4dc2014-12-29 10:07:47 -0500478 typedmemmove(t.elem, v2, val)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700479 return
480 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800481 ovf := b.overflow(t)
482 if ovf == nil {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700483 break
484 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800485 b = ovf
Keith Randall0c6b55e2014-07-16 14:16:19 -0700486 }
487
488 // did not find mapping for key. Allocate new cell & add entry.
489 if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
490 hashGrow(t, h)
491 goto again // Growing the table invalidates everything, so try again
492 }
493
494 if inserti == nil {
495 // all current buckets are full, allocate a new one.
496 if checkgc {
497 memstats.next_gc = memstats.heap_alloc
498 }
Keith Randall4aa50432014-07-30 09:01:52 -0700499 newb := (*bmap)(newobject(t.bucket))
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300500 h.setoverflow(t, b, newb)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700501 inserti = &newb.tophash[0]
502 insertk = add(unsafe.Pointer(newb), dataOffset)
Keith Randall668a55a2014-08-01 14:38:56 -0700503 insertv = add(insertk, bucketCnt*uintptr(t.keysize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700504 }
505
506 // store new key/value at insert position
Russ Coxd21638b2014-08-27 21:59:49 -0400507 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700508 if checkgc {
509 memstats.next_gc = memstats.heap_alloc
510 }
Keith Randall4aa50432014-07-30 09:01:52 -0700511 kmem := newobject(t.key)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700512 *(*unsafe.Pointer)(insertk) = kmem
513 insertk = kmem
514 }
Russ Coxd21638b2014-08-27 21:59:49 -0400515 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700516 if checkgc {
517 memstats.next_gc = memstats.heap_alloc
518 }
Keith Randall4aa50432014-07-30 09:01:52 -0700519 vmem := newobject(t.elem)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700520 *(*unsafe.Pointer)(insertv) = vmem
521 insertv = vmem
522 }
Russ Cox54bb4dc2014-12-29 10:07:47 -0500523 typedmemmove(t.key, insertk, key)
524 typedmemmove(t.elem, insertv, val)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700525 *inserti = top
526 h.count++
527}
528
529func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
530 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400531 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400532 pc := funcPC(mapdelete)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700533 racewritepc(unsafe.Pointer(h), callerpc, pc)
534 raceReadObjectPC(t.key, key, callerpc, pc)
535 }
536 if h == nil || h.count == 0 {
537 return
538 }
Keith Randallb1f29b22014-12-27 20:32:11 -0800539 alg := t.key.alg
Keith Randalld5e4c402015-01-06 16:42:48 -0800540 hash := alg.hash(key, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700541 bucket := hash & (uintptr(1)<<h.B - 1)
542 if h.oldbuckets != nil {
543 growWork(t, h, bucket)
544 }
Keith Randall668a55a2014-08-01 14:38:56 -0700545 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700546 top := uint8(hash >> (ptrSize*8 - 8))
547 if top < minTopHash {
548 top += minTopHash
549 }
550 for {
551 for i := uintptr(0); i < bucketCnt; i++ {
552 if b.tophash[i] != top {
553 continue
554 }
Keith Randall668a55a2014-08-01 14:38:56 -0700555 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700556 k2 := k
Russ Coxd21638b2014-08-27 21:59:49 -0400557 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700558 k2 = *((*unsafe.Pointer)(k2))
559 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800560 if !alg.equal(key, k2) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700561 continue
562 }
Keith Randall668a55a2014-08-01 14:38:56 -0700563 memclr(k, uintptr(t.keysize))
564 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
565 memclr(v, uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700566 b.tophash[i] = empty
567 h.count--
568 return
569 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800570 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700571 if b == nil {
572 return
573 }
574 }
575}
576
577func mapiterinit(t *maptype, h *hmap, it *hiter) {
578 // Clear pointer fields so garbage collector does not complain.
579 it.key = nil
580 it.value = nil
581 it.t = nil
582 it.h = nil
583 it.buckets = nil
584 it.bptr = nil
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300585 it.overflow[0] = nil
586 it.overflow[1] = nil
Keith Randall0c6b55e2014-07-16 14:16:19 -0700587
588 if raceenabled && h != nil {
Russ Coxd21638b2014-08-27 21:59:49 -0400589 callerpc := getcallerpc(unsafe.Pointer(&t))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400590 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700591 }
592
593 if h == nil || h.count == 0 {
594 it.key = nil
595 it.value = nil
596 return
597 }
598
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300599 if unsafe.Sizeof(hiter{})/ptrSize != 12 {
Keith Randallcd5b1442015-03-11 12:58:47 -0700600 throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
Keith Randall0c6b55e2014-07-16 14:16:19 -0700601 }
602 it.t = t
603 it.h = h
604
605 // grab snapshot of bucket state
606 it.B = h.B
607 it.buckets = h.buckets
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300608 if t.bucket.kind&kindNoPointers != 0 {
609 // Allocate the current slice and remember pointers to both current and old.
610 // This preserves all relevant overflow buckets alive even if
611 // the table grows and/or overflow buckets are added to the table
612 // while we are iterating.
613 h.createOverflow()
614 it.overflow = *h.overflow
615 }
Keith Randall0c6b55e2014-07-16 14:16:19 -0700616
Keith Randall55c458e2014-09-08 17:42:21 -0700617 // decide where to start
Keith Randall251daf82014-09-09 14:22:58 -0700618 r := uintptr(fastrand1())
619 if h.B > 31-bucketCntBits {
620 r += uintptr(fastrand1()) << 31
Keith Randall55c458e2014-09-08 17:42:21 -0700621 }
Keith Randall251daf82014-09-09 14:22:58 -0700622 it.startBucket = r & (uintptr(1)<<h.B - 1)
623 it.offset = uint8(r >> h.B & (bucketCnt - 1))
Keith Randall55c458e2014-09-08 17:42:21 -0700624
Keith Randall0c6b55e2014-07-16 14:16:19 -0700625 // iterator state
Keith Randall55c458e2014-09-08 17:42:21 -0700626 it.bucket = it.startBucket
627 it.wrapped = false
Keith Randall0c6b55e2014-07-16 14:16:19 -0700628 it.bptr = nil
629
630 // Remember we have an iterator.
631 // Can run concurrently with another hash_iter_init().
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300632 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
633 atomicor8(&h.flags, iterator|oldIterator)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700634 }
635
636 mapiternext(it)
637}
638
639func mapiternext(it *hiter) {
640 h := it.h
641 if raceenabled {
Russ Coxd21638b2014-08-27 21:59:49 -0400642 callerpc := getcallerpc(unsafe.Pointer(&it))
Russ Cox0e07f1c2014-09-03 11:10:38 -0400643 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700644 }
645 t := it.t
646 bucket := it.bucket
647 b := it.bptr
648 i := it.i
649 checkBucket := it.checkBucket
Keith Randallb1f29b22014-12-27 20:32:11 -0800650 alg := t.key.alg
Keith Randall0c6b55e2014-07-16 14:16:19 -0700651
652next:
653 if b == nil {
Keith Randall55c458e2014-09-08 17:42:21 -0700654 if bucket == it.startBucket && it.wrapped {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700655 // end of iteration
656 it.key = nil
657 it.value = nil
658 return
659 }
660 if h.oldbuckets != nil && it.B == h.B {
661 // Iterator was started in the middle of a grow, and the grow isn't done yet.
662 // If the bucket we're looking at hasn't been filled in yet (i.e. the old
663 // bucket hasn't been evacuated) then we need to iterate through the old
664 // bucket and only return the ones that will be migrated to this bucket.
665 oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
Keith Randall668a55a2014-08-01 14:38:56 -0700666 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700667 if !evacuated(b) {
668 checkBucket = bucket
669 } else {
Keith Randall668a55a2014-08-01 14:38:56 -0700670 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700671 checkBucket = noCheck
672 }
673 } else {
Keith Randall668a55a2014-08-01 14:38:56 -0700674 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700675 checkBucket = noCheck
676 }
677 bucket++
678 if bucket == uintptr(1)<<it.B {
679 bucket = 0
Keith Randall55c458e2014-09-08 17:42:21 -0700680 it.wrapped = true
Keith Randall0c6b55e2014-07-16 14:16:19 -0700681 }
682 i = 0
683 }
684 for ; i < bucketCnt; i++ {
Keith Randall55c458e2014-09-08 17:42:21 -0700685 offi := (i + it.offset) & (bucketCnt - 1)
686 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
687 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700688 if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
689 if checkBucket != noCheck {
690 // Special case: iterator was started during a grow and the
691 // grow is not done yet. We're working on a bucket whose
692 // oldbucket has not been evacuated yet. Or at least, it wasn't
693 // evacuated when we started the bucket. So we're iterating
694 // through the oldbucket, skipping any keys that will go
695 // to the other new bucket (each oldbucket expands to two
696 // buckets during a grow).
697 k2 := k
Russ Coxd21638b2014-08-27 21:59:49 -0400698 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700699 k2 = *((*unsafe.Pointer)(k2))
700 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800701 if t.reflexivekey || alg.equal(k2, k2) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700702 // If the item in the oldbucket is not destined for
703 // the current new bucket in the iteration, skip it.
Keith Randalld5e4c402015-01-06 16:42:48 -0800704 hash := alg.hash(k2, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700705 if hash&(uintptr(1)<<it.B-1) != checkBucket {
706 continue
707 }
708 } else {
709 // Hash isn't repeatable if k != k (NaNs). We need a
710 // repeatable and randomish choice of which direction
711 // to send NaNs during evacuation. We'll use the low
712 // bit of tophash to decide which way NaNs go.
713 // NOTE: this case is why we need two evacuate tophash
714 // values, evacuatedX and evacuatedY, that differ in
715 // their low bit.
716 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
717 continue
718 }
719 }
720 }
721 if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
722 // this is the golden data, we can return it.
Russ Coxd21638b2014-08-27 21:59:49 -0400723 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700724 k = *((*unsafe.Pointer)(k))
725 }
726 it.key = k
Russ Coxd21638b2014-08-27 21:59:49 -0400727 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700728 v = *((*unsafe.Pointer)(v))
729 }
730 it.value = v
731 } else {
732 // The hash table has grown since the iterator was started.
733 // The golden data for this key is now somewhere else.
734 k2 := k
Russ Coxd21638b2014-08-27 21:59:49 -0400735 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700736 k2 = *((*unsafe.Pointer)(k2))
737 }
Keith Randalld5e4c402015-01-06 16:42:48 -0800738 if t.reflexivekey || alg.equal(k2, k2) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700739 // Check the current hash table for the data.
740 // This code handles the case where the key
741 // has been deleted, updated, or deleted and reinserted.
742 // NOTE: we need to regrab the key as it has potentially been
743 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
744 rk, rv := mapaccessK(t, h, k2)
745 if rk == nil {
746 continue // key has been deleted
747 }
748 it.key = rk
749 it.value = rv
750 } else {
751 // if key!=key then the entry can't be deleted or
752 // updated, so we can just return it. That's lucky for
753 // us because when key!=key we can't look it up
754 // successfully in the current table.
755 it.key = k2
Russ Coxd21638b2014-08-27 21:59:49 -0400756 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700757 v = *((*unsafe.Pointer)(v))
758 }
759 it.value = v
760 }
761 }
762 it.bucket = bucket
763 it.bptr = b
764 it.i = i + 1
765 it.checkBucket = checkBucket
766 return
767 }
768 }
Keith Randallfbc56cf2014-12-19 20:44:18 -0800769 b = b.overflow(t)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700770 i = 0
771 goto next
772}
773
774func hashGrow(t *maptype, h *hmap) {
775 if h.oldbuckets != nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800776 throw("evacuation not done in time")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700777 }
778 oldbuckets := h.buckets
779 if checkgc {
780 memstats.next_gc = memstats.heap_alloc
781 }
Keith Randall4aa50432014-07-30 09:01:52 -0700782 newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700783 flags := h.flags &^ (iterator | oldIterator)
784 if h.flags&iterator != 0 {
785 flags |= oldIterator
786 }
787 // commit the grow (atomic wrt gc)
788 h.B++
789 h.flags = flags
790 h.oldbuckets = oldbuckets
791 h.buckets = newbuckets
792 h.nevacuate = 0
793
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300794 if h.overflow != nil {
795 // Promote current overflow buckets to the old generation.
796 if h.overflow[1] != nil {
797 throw("overflow is not nil")
798 }
799 h.overflow[1] = h.overflow[0]
800 h.overflow[0] = nil
801 }
802
Keith Randall0c6b55e2014-07-16 14:16:19 -0700803 // the actual copying of the hash table data is done incrementally
804 // by growWork() and evacuate().
805}
806
807func growWork(t *maptype, h *hmap, bucket uintptr) {
808 noldbuckets := uintptr(1) << (h.B - 1)
809
810 // make sure we evacuate the oldbucket corresponding
811 // to the bucket we're about to use
812 evacuate(t, h, bucket&(noldbuckets-1))
813
814 // evacuate one more oldbucket to make progress on growing
815 if h.oldbuckets != nil {
816 evacuate(t, h, h.nevacuate)
817 }
818}
819
820func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
Keith Randall668a55a2014-08-01 14:38:56 -0700821 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700822 newbit := uintptr(1) << (h.B - 1)
Keith Randallb1f29b22014-12-27 20:32:11 -0800823 alg := t.key.alg
Keith Randall0c6b55e2014-07-16 14:16:19 -0700824 if !evacuated(b) {
825 // TODO: reuse overflow buckets instead of using new ones, if there
826 // is no iterator using the old buckets. (If !oldIterator.)
827
Keith Randall668a55a2014-08-01 14:38:56 -0700828 x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
829 y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700830 xi := 0
831 yi := 0
832 xk := add(unsafe.Pointer(x), dataOffset)
833 yk := add(unsafe.Pointer(y), dataOffset)
Keith Randall668a55a2014-08-01 14:38:56 -0700834 xv := add(xk, bucketCnt*uintptr(t.keysize))
835 yv := add(yk, bucketCnt*uintptr(t.keysize))
Keith Randallfbc56cf2014-12-19 20:44:18 -0800836 for ; b != nil; b = b.overflow(t) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700837 k := add(unsafe.Pointer(b), dataOffset)
Keith Randall668a55a2014-08-01 14:38:56 -0700838 v := add(k, bucketCnt*uintptr(t.keysize))
839 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700840 top := b.tophash[i]
841 if top == empty {
842 b.tophash[i] = evacuatedEmpty
843 continue
844 }
845 if top < minTopHash {
Keith Randallb2a950b2014-12-27 20:58:00 -0800846 throw("bad map state")
Keith Randall0c6b55e2014-07-16 14:16:19 -0700847 }
848 k2 := k
Russ Coxd21638b2014-08-27 21:59:49 -0400849 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700850 k2 = *((*unsafe.Pointer)(k2))
851 }
852 // Compute hash to make our evacuation decision (whether we need
853 // to send this key/value to bucket x or bucket y).
Keith Randalld5e4c402015-01-06 16:42:48 -0800854 hash := alg.hash(k2, uintptr(h.hash0))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700855 if h.flags&iterator != 0 {
Keith Randalld5e4c402015-01-06 16:42:48 -0800856 if !t.reflexivekey && !alg.equal(k2, k2) {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700857 // If key != key (NaNs), then the hash could be (and probably
858 // will be) entirely different from the old hash. Moreover,
859 // it isn't reproducible. Reproducibility is required in the
860 // presence of iterators, as our evacuation decision must
861 // match whatever decision the iterator made.
862 // Fortunately, we have the freedom to send these keys either
863 // way. Also, tophash is meaningless for these kinds of keys.
864 // We let the low bit of tophash drive the evacuation decision.
865 // We recompute a new random tophash for the next level so
866 // these keys will get evenly distributed across all buckets
867 // after multiple grows.
868 if (top & 1) != 0 {
869 hash |= newbit
870 } else {
871 hash &^= newbit
872 }
873 top = uint8(hash >> (ptrSize*8 - 8))
874 if top < minTopHash {
875 top += minTopHash
876 }
877 }
878 }
879 if (hash & newbit) == 0 {
880 b.tophash[i] = evacuatedX
881 if xi == bucketCnt {
882 if checkgc {
883 memstats.next_gc = memstats.heap_alloc
884 }
Keith Randall4aa50432014-07-30 09:01:52 -0700885 newx := (*bmap)(newobject(t.bucket))
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300886 h.setoverflow(t, x, newx)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700887 x = newx
888 xi = 0
889 xk = add(unsafe.Pointer(x), dataOffset)
Keith Randall668a55a2014-08-01 14:38:56 -0700890 xv = add(xk, bucketCnt*uintptr(t.keysize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700891 }
892 x.tophash[xi] = top
Russ Coxd21638b2014-08-27 21:59:49 -0400893 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700894 *(*unsafe.Pointer)(xk) = k2 // copy pointer
895 } else {
Russ Cox54bb4dc2014-12-29 10:07:47 -0500896 typedmemmove(t.key, xk, k) // copy value
Keith Randall0c6b55e2014-07-16 14:16:19 -0700897 }
Russ Coxd21638b2014-08-27 21:59:49 -0400898 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700899 *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
900 } else {
Russ Cox54bb4dc2014-12-29 10:07:47 -0500901 typedmemmove(t.elem, xv, v)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700902 }
903 xi++
Keith Randall668a55a2014-08-01 14:38:56 -0700904 xk = add(xk, uintptr(t.keysize))
905 xv = add(xv, uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700906 } else {
907 b.tophash[i] = evacuatedY
908 if yi == bucketCnt {
909 if checkgc {
910 memstats.next_gc = memstats.heap_alloc
911 }
Keith Randall4aa50432014-07-30 09:01:52 -0700912 newy := (*bmap)(newobject(t.bucket))
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300913 h.setoverflow(t, y, newy)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700914 y = newy
915 yi = 0
916 yk = add(unsafe.Pointer(y), dataOffset)
Keith Randall668a55a2014-08-01 14:38:56 -0700917 yv = add(yk, bucketCnt*uintptr(t.keysize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700918 }
919 y.tophash[yi] = top
Russ Coxd21638b2014-08-27 21:59:49 -0400920 if t.indirectkey {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700921 *(*unsafe.Pointer)(yk) = k2
922 } else {
Russ Cox54bb4dc2014-12-29 10:07:47 -0500923 typedmemmove(t.key, yk, k)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700924 }
Russ Coxd21638b2014-08-27 21:59:49 -0400925 if t.indirectvalue {
Keith Randall0c6b55e2014-07-16 14:16:19 -0700926 *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
927 } else {
Russ Cox54bb4dc2014-12-29 10:07:47 -0500928 typedmemmove(t.elem, yv, v)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700929 }
930 yi++
Keith Randall668a55a2014-08-01 14:38:56 -0700931 yk = add(yk, uintptr(t.keysize))
932 yv = add(yv, uintptr(t.valuesize))
Keith Randall0c6b55e2014-07-16 14:16:19 -0700933 }
934 }
935 }
936 // Unlink the overflow buckets & clear key/value to help GC.
937 if h.flags&oldIterator == 0 {
Keith Randall668a55a2014-08-01 14:38:56 -0700938 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
Keith Randall668a55a2014-08-01 14:38:56 -0700939 memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700940 }
941 }
942
943 // Advance evacuation mark
944 if oldbucket == h.nevacuate {
945 h.nevacuate = oldbucket + 1
946 if oldbucket+1 == newbit { // newbit == # of oldbuckets
947 // Growing is all done. Free old main bucket array.
948 h.oldbuckets = nil
Dmitry Vyukov85e7bee2015-01-26 21:04:41 +0300949 // Can discard old overflow buckets as well.
950 // If they are still referenced by an iterator,
951 // then the iterator holds a pointers to the slice.
952 if h.overflow != nil {
953 h.overflow[1] = nil
954 }
Keith Randall0c6b55e2014-07-16 14:16:19 -0700955 }
956 }
957}
958
959func ismapkey(t *_type) bool {
Keith Randallb1f29b22014-12-27 20:32:11 -0800960 return t.alg.hash != nil
Keith Randall0c6b55e2014-07-16 14:16:19 -0700961}
962
963// Reflect stubs. Called from ../reflect/asm_*.s
964
Russ Cox7a524a12014-12-22 13:27:53 -0500965//go:linkname reflect_makemap reflect.makemap
Keith Randall0c6b55e2014-07-16 14:16:19 -0700966func reflect_makemap(t *maptype) *hmap {
Dmitry Vyukovb3be3602015-01-29 19:40:02 +0300967 return makemap(t, 0, nil, nil)
Keith Randall0c6b55e2014-07-16 14:16:19 -0700968}
969
Russ Cox7a524a12014-12-22 13:27:53 -0500970//go:linkname reflect_mapaccess reflect.mapaccess
Keith Randall0c6b55e2014-07-16 14:16:19 -0700971func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
972 val, ok := mapaccess2(t, h, key)
973 if !ok {
974 // reflect wants nil for a missing element
975 val = nil
976 }
977 return val
978}
979
Russ Cox7a524a12014-12-22 13:27:53 -0500980//go:linkname reflect_mapassign reflect.mapassign
Keith Randall0c6b55e2014-07-16 14:16:19 -0700981func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
982 mapassign1(t, h, key, val)
983}
984
Russ Cox7a524a12014-12-22 13:27:53 -0500985//go:linkname reflect_mapdelete reflect.mapdelete
Keith Randall0c6b55e2014-07-16 14:16:19 -0700986func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
987 mapdelete(t, h, key)
988}
989
Russ Cox7a524a12014-12-22 13:27:53 -0500990//go:linkname reflect_mapiterinit reflect.mapiterinit
Keith Randall0c6b55e2014-07-16 14:16:19 -0700991func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
992 it := new(hiter)
993 mapiterinit(t, h, it)
994 return it
995}
996
Russ Cox7a524a12014-12-22 13:27:53 -0500997//go:linkname reflect_mapiternext reflect.mapiternext
Keith Randall0c6b55e2014-07-16 14:16:19 -0700998func reflect_mapiternext(it *hiter) {
999 mapiternext(it)
1000}
1001
Russ Cox7a524a12014-12-22 13:27:53 -05001002//go:linkname reflect_mapiterkey reflect.mapiterkey
Keith Randall0c6b55e2014-07-16 14:16:19 -07001003func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1004 return it.key
1005}
1006
Russ Cox7a524a12014-12-22 13:27:53 -05001007//go:linkname reflect_maplen reflect.maplen
Keith Randall0c6b55e2014-07-16 14:16:19 -07001008func reflect_maplen(h *hmap) int {
1009 if h == nil {
1010 return 0
1011 }
1012 if raceenabled {
Russ Coxd21638b2014-08-27 21:59:49 -04001013 callerpc := getcallerpc(unsafe.Pointer(&h))
Russ Cox0e07f1c2014-09-03 11:10:38 -04001014 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
Keith Randall0c6b55e2014-07-16 14:16:19 -07001015 }
1016 return h.count
1017}
1018
Russ Cox7a524a12014-12-22 13:27:53 -05001019//go:linkname reflect_ismapkey reflect.ismapkey
Keith Randall0c6b55e2014-07-16 14:16:19 -07001020func reflect_ismapkey(t *_type) bool {
1021 return ismapkey(t)
1022}