Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 1 | // Copyright 2014 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
| 7 | // This file contains the implementation of Go's map type. |
| 8 | // |
| 9 | // A map is just a hash table. The data is arranged |
| 10 | // into an array of buckets. Each bucket contains up to |
| 11 | // 8 key/value pairs. The low-order bits of the hash are |
| 12 | // used to select a bucket. Each bucket contains a few |
| 13 | // high-order bits of each hash to distinguish the entries |
| 14 | // within a single bucket. |
| 15 | // |
| 16 | // If more than 8 keys hash to a bucket, we chain on |
| 17 | // extra buckets. |
| 18 | // |
| 19 | // When the hashtable grows, we allocate a new array |
| 20 | // of buckets twice as big. Buckets are incrementally |
| 21 | // copied from the old bucket array to the new bucket array. |
| 22 | // |
| 23 | // Map iterators walk through the array of buckets and |
| 24 | // return the keys in walk order (bucket #, then overflow |
| 25 | // chain order, then bucket index). To maintain iteration |
| 26 | // semantics, we never move keys within their bucket (if |
| 27 | // we did, keys might be returned 0 or 2 times). When |
| 28 | // growing the table, iterators remain iterating through the |
| 29 | // old table and must check the new table if the bucket |
| 30 | // they are iterating through has been moved ("evacuated") |
| 31 | // to the new table. |
| 32 | |
| 33 | // Picking loadFactor: too large and we have lots of overflow |
| 34 | // buckets, too small and we waste a lot of space. I wrote |
| 35 | // a simple program to check some stats for different loads: |
| 36 | // (64-bit, 8 byte keys and values) |
| 37 | // loadFactor %overflow bytes/entry hitprobe missprobe |
| 38 | // 4.00 2.13 20.77 3.00 4.00 |
| 39 | // 4.50 4.05 17.30 3.25 4.50 |
| 40 | // 5.00 6.85 14.77 3.50 5.00 |
| 41 | // 5.50 10.55 12.94 3.75 5.50 |
| 42 | // 6.00 15.27 11.67 4.00 6.00 |
| 43 | // 6.50 20.90 10.79 4.25 6.50 |
| 44 | // 7.00 27.14 10.15 4.50 7.00 |
| 45 | // 7.50 34.03 9.73 4.75 7.50 |
| 46 | // 8.00 41.10 9.40 5.00 8.00 |
| 47 | // |
| 48 | // %overflow = percentage of buckets which have an overflow bucket |
| 49 | // bytes/entry = overhead bytes used per key/value pair |
| 50 | // hitprobe = # of entries to check when looking up a present key |
| 51 | // missprobe = # of entries to check when looking up an absent key |
| 52 | // |
| 53 | // Keep in mind this data is for maximally loaded tables, i.e. just |
| 54 | // before the table grows. Typical tables will be somewhat less loaded. |
| 55 | |
| 56 | import ( |
| 57 | "unsafe" |
| 58 | ) |
| 59 | |
| 60 | const ( |
| 61 | // Maximum number of key/value pairs a bucket can hold. |
Keith Randall | 251daf8 | 2014-09-09 14:22:58 -0700 | [diff] [blame] | 62 | bucketCntBits = 3 |
| 63 | bucketCnt = 1 << bucketCntBits |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 64 | |
| 65 | // Maximum average load of a bucket that triggers growth. |
| 66 | loadFactor = 6.5 |
| 67 | |
| 68 | // Maximum key or value size to keep inline (instead of mallocing per element). |
| 69 | // Must fit in a uint8. |
| 70 | // Fast versions cannot handle big values - the cutoff size for |
Keith Randall | cd5b144 | 2015-03-11 12:58:47 -0700 | [diff] [blame] | 71 | // fast versions in ../../cmd/internal/gc/walk.go must be at most this value. |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 72 | maxKeySize = 128 |
| 73 | maxValueSize = 128 |
| 74 | |
| 75 | // data offset should be the size of the bmap struct, but needs to be |
| 76 | // aligned correctly. For amd64p32 this means 64-bit alignment |
| 77 | // even though pointers are 32 bit. |
| 78 | dataOffset = unsafe.Offsetof(struct { |
| 79 | b bmap |
| 80 | v int64 |
| 81 | }{}.v) |
| 82 | |
| 83 | // Possible tophash values. We reserve a few possibilities for special marks. |
| 84 | // Each bucket (including its overflow buckets, if any) will have either all or none of its |
| 85 | // entries in the evacuated* states (except during the evacuate() method, which only happens |
| 86 | // during map writes and thus no one else can observe the map during that time). |
| 87 | empty = 0 // cell is empty |
| 88 | evacuatedEmpty = 1 // cell is empty, bucket is evacuated. |
| 89 | evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. |
| 90 | evacuatedY = 3 // same as above, but evacuated to second half of larger table. |
| 91 | minTopHash = 4 // minimum tophash for a normal filled cell. |
| 92 | |
| 93 | // flags |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 94 | iterator = 1 // there may be an iterator using buckets |
| 95 | oldIterator = 2 // there may be an iterator using oldbuckets |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 96 | |
| 97 | // sentinel bucket ID for iterator checks |
| 98 | noCheck = 1<<(8*ptrSize) - 1 |
| 99 | |
| 100 | // trigger a garbage collection at every alloc called from this code |
| 101 | checkgc = false |
| 102 | ) |
| 103 | |
| 104 | // A header for a Go map. |
| 105 | type hmap struct { |
Keith Randall | cd5b144 | 2015-03-11 12:58:47 -0700 | [diff] [blame] | 106 | // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 107 | // ../reflect/type.go. Don't change this structure without also changing that code! |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 108 | count int // # live cells == size of map. Must be first (used by len() builtin) |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 109 | flags uint8 |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 110 | B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 111 | hash0 uint32 // hash seed |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 112 | |
| 113 | buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. |
| 114 | oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing |
| 115 | nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 116 | |
Dmitry Vyukov | 52dadc1 | 2015-02-14 16:10:06 +0300 | [diff] [blame] | 117 | // If both key and value do not contain pointers and are inline, then we mark bucket |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 118 | // type as containing no pointers. This avoids scanning such maps. |
| 119 | // However, bmap.overflow is a pointer. In order to keep overflow buckets |
| 120 | // alive, we store pointers to all overflow buckets in hmap.overflow. |
| 121 | // Overflow is used only if key and value do not contain pointers. |
| 122 | // overflow[0] contains overflow buckets for hmap.buckets. |
| 123 | // overflow[1] contains overflow buckets for hmap.oldbuckets. |
| 124 | // The first indirection allows us to reduce static size of hmap. |
| 125 | // The second indirection allows to store a pointer to the slice in hiter. |
| 126 | overflow *[2]*[]*bmap |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | // A bucket for a Go map. |
| 130 | type bmap struct { |
mattn | e26e3fa | 2014-12-26 11:44:55 +0900 | [diff] [blame] | 131 | tophash [bucketCnt]uint8 |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 132 | // Followed by bucketCnt keys and then bucketCnt values. |
| 133 | // NOTE: packing all the keys together and then all the values together makes the |
| 134 | // code a bit more complicated than alternating key/value/key/value/... but it allows |
| 135 | // us to eliminate padding which would be needed for, e.g., map[int64]int8. |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 136 | // Followed by an overflow pointer. |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | // A hash iteration structure. |
Keith Randall | cd5b144 | 2015-03-11 12:58:47 -0700 | [diff] [blame] | 140 | // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 141 | // the layout of this structure. |
| 142 | type hiter struct { |
Keith Randall | cd5b144 | 2015-03-11 12:58:47 -0700 | [diff] [blame] | 143 | key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). |
| 144 | value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 145 | t *maptype |
| 146 | h *hmap |
| 147 | buckets unsafe.Pointer // bucket ptr at hash_iter initialization time |
| 148 | bptr *bmap // current bucket |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 149 | overflow [2]*[]*bmap // keeps overflow buckets alive |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 150 | startBucket uintptr // bucket iteration started at |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 151 | offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 152 | wrapped bool // already wrapped around from end of bucket array to beginning |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 153 | B uint8 |
Keith Randall | 251daf8 | 2014-09-09 14:22:58 -0700 | [diff] [blame] | 154 | i uint8 |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 155 | bucket uintptr |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 156 | checkBucket uintptr |
| 157 | } |
| 158 | |
| 159 | func evacuated(b *bmap) bool { |
| 160 | h := b.tophash[0] |
| 161 | return h > empty && h < minTopHash |
| 162 | } |
| 163 | |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 164 | func (b *bmap) overflow(t *maptype) *bmap { |
mattn | e26e3fa | 2014-12-26 11:44:55 +0900 | [diff] [blame] | 165 | return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 166 | } |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 167 | |
| 168 | func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) { |
| 169 | if t.bucket.kind&kindNoPointers != 0 { |
| 170 | h.createOverflow() |
| 171 | *h.overflow[0] = append(*h.overflow[0], ovf) |
| 172 | } |
mattn | e26e3fa | 2014-12-26 11:44:55 +0900 | [diff] [blame] | 173 | *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 174 | } |
| 175 | |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 176 | func (h *hmap) createOverflow() { |
| 177 | if h.overflow == nil { |
| 178 | h.overflow = new([2]*[]*bmap) |
| 179 | } |
| 180 | if h.overflow[0] == nil { |
| 181 | h.overflow[0] = new([]*bmap) |
| 182 | } |
| 183 | } |
| 184 | |
Dmitry Vyukov | b3be360 | 2015-01-29 19:40:02 +0300 | [diff] [blame] | 185 | // makemap implements a Go map creation make(map[k]v, hint) |
| 186 | // If the compiler has determined that the map or the first bucket |
| 187 | // can be created on the stack, h and/or bucket may be non-nil. |
| 188 | // If h != nil, the map can be created directly in h. |
| 189 | // If bucket != nil, bucket can be used as the first bucket. |
| 190 | func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { |
Dmitriy Vyukov | fe3ee57 | 2014-07-29 22:06:20 +0400 | [diff] [blame] | 191 | if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { |
Dmitry Vyukov | b3be360 | 2015-01-29 19:40:02 +0300 | [diff] [blame] | 192 | println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size) |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 193 | throw("bad hmap size") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | if hint < 0 || int64(int32(hint)) != hint { |
| 197 | panic("makemap: size out of range") |
| 198 | // TODO: make hint an int, then none of this nonsense |
| 199 | } |
| 200 | |
| 201 | if !ismapkey(t.key) { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 202 | throw("runtime.makemap: unsupported map key type") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 203 | } |
| 204 | |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 205 | // check compiler's and reflect's math |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 206 | if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || |
| 207 | t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 208 | throw("key size wrong") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 209 | } |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 210 | if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || |
| 211 | t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 212 | throw("value size wrong") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | // invariants we depend on. We should probably check these at compile time |
| 216 | // somewhere, but for now we'll do it here. |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 217 | if t.key.align > bucketCnt { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 218 | throw("key align too big") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 219 | } |
| 220 | if t.elem.align > bucketCnt { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 221 | throw("value align too big") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 222 | } |
| 223 | if uintptr(t.key.size)%uintptr(t.key.align) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 224 | throw("key size not a multiple of key align") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 225 | } |
| 226 | if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 227 | throw("value size not a multiple of value align") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 228 | } |
| 229 | if bucketCnt < 8 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 230 | throw("bucketsize too small for proper alignment") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 231 | } |
| 232 | if dataOffset%uintptr(t.key.align) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 233 | throw("need padding in bucket (key)") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 234 | } |
| 235 | if dataOffset%uintptr(t.elem.align) != 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 236 | throw("need padding in bucket (value)") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | // find size parameter which will hold the requested # of elements |
| 240 | B := uint8(0) |
| 241 | for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { |
| 242 | } |
| 243 | |
| 244 | // allocate initial hash table |
| 245 | // if B == 0, the buckets field is allocated lazily later (in mapassign) |
| 246 | // If hint is large zeroing this memory could take a while. |
Dmitry Vyukov | b3be360 | 2015-01-29 19:40:02 +0300 | [diff] [blame] | 247 | buckets := bucket |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 248 | if B != 0 { |
| 249 | if checkgc { |
| 250 | memstats.next_gc = memstats.heap_alloc |
| 251 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 252 | buckets = newarray(t.bucket, uintptr(1)<<B) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | // initialize Hmap |
| 256 | if checkgc { |
| 257 | memstats.next_gc = memstats.heap_alloc |
| 258 | } |
Dmitry Vyukov | b3be360 | 2015-01-29 19:40:02 +0300 | [diff] [blame] | 259 | if h == nil { |
| 260 | h = (*hmap)(newobject(t.hmap)) |
| 261 | } |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 262 | h.count = 0 |
| 263 | h.B = B |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 264 | h.flags = 0 |
Keith Randall | 3306d11 | 2014-09-02 14:33:33 -0700 | [diff] [blame] | 265 | h.hash0 = fastrand1() |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 266 | h.buckets = buckets |
| 267 | h.oldbuckets = nil |
| 268 | h.nevacuate = 0 |
| 269 | |
| 270 | return h |
| 271 | } |
| 272 | |
| 273 | // mapaccess1 returns a pointer to h[key]. Never returns nil, instead |
| 274 | // it will return a reference to the zero object for the value type if |
| 275 | // the key is not in the map. |
| 276 | // NOTE: The returned pointer may keep the whole map live, so don't |
| 277 | // hold onto it for very long. |
| 278 | func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { |
| 279 | if raceenabled && h != nil { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 280 | callerpc := getcallerpc(unsafe.Pointer(&t)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 281 | pc := funcPC(mapaccess1) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 282 | racereadpc(unsafe.Pointer(h), callerpc, pc) |
| 283 | raceReadObjectPC(t.key, key, callerpc, pc) |
| 284 | } |
| 285 | if h == nil || h.count == 0 { |
| 286 | return unsafe.Pointer(t.elem.zero) |
| 287 | } |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 288 | alg := t.key.alg |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 289 | hash := alg.hash(key, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 290 | m := uintptr(1)<<h.B - 1 |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 291 | b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 292 | if c := h.oldbuckets; c != nil { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 293 | oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 294 | if !evacuated(oldb) { |
| 295 | b = oldb |
| 296 | } |
| 297 | } |
| 298 | top := uint8(hash >> (ptrSize*8 - 8)) |
| 299 | if top < minTopHash { |
| 300 | top += minTopHash |
| 301 | } |
| 302 | for { |
| 303 | for i := uintptr(0); i < bucketCnt; i++ { |
| 304 | if b.tophash[i] != top { |
| 305 | continue |
| 306 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 307 | k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 308 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 309 | k = *((*unsafe.Pointer)(k)) |
| 310 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 311 | if alg.equal(key, k) { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 312 | v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 313 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 314 | v = *((*unsafe.Pointer)(v)) |
| 315 | } |
| 316 | return v |
| 317 | } |
| 318 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 319 | b = b.overflow(t) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 320 | if b == nil { |
| 321 | return unsafe.Pointer(t.elem.zero) |
| 322 | } |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { |
| 327 | if raceenabled && h != nil { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 328 | callerpc := getcallerpc(unsafe.Pointer(&t)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 329 | pc := funcPC(mapaccess2) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 330 | racereadpc(unsafe.Pointer(h), callerpc, pc) |
| 331 | raceReadObjectPC(t.key, key, callerpc, pc) |
| 332 | } |
| 333 | if h == nil || h.count == 0 { |
| 334 | return unsafe.Pointer(t.elem.zero), false |
| 335 | } |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 336 | alg := t.key.alg |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 337 | hash := alg.hash(key, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 338 | m := uintptr(1)<<h.B - 1 |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 339 | b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 340 | if c := h.oldbuckets; c != nil { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 341 | oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 342 | if !evacuated(oldb) { |
| 343 | b = oldb |
| 344 | } |
| 345 | } |
| 346 | top := uint8(hash >> (ptrSize*8 - 8)) |
| 347 | if top < minTopHash { |
| 348 | top += minTopHash |
| 349 | } |
| 350 | for { |
| 351 | for i := uintptr(0); i < bucketCnt; i++ { |
| 352 | if b.tophash[i] != top { |
| 353 | continue |
| 354 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 355 | k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 356 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 357 | k = *((*unsafe.Pointer)(k)) |
| 358 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 359 | if alg.equal(key, k) { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 360 | v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 361 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 362 | v = *((*unsafe.Pointer)(v)) |
| 363 | } |
| 364 | return v, true |
| 365 | } |
| 366 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 367 | b = b.overflow(t) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 368 | if b == nil { |
| 369 | return unsafe.Pointer(t.elem.zero), false |
| 370 | } |
| 371 | } |
| 372 | } |
| 373 | |
| 374 | // returns both key and value. Used by map iterator |
| 375 | func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { |
| 376 | if h == nil || h.count == 0 { |
| 377 | return nil, nil |
| 378 | } |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 379 | alg := t.key.alg |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 380 | hash := alg.hash(key, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 381 | m := uintptr(1)<<h.B - 1 |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 382 | b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 383 | if c := h.oldbuckets; c != nil { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 384 | oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 385 | if !evacuated(oldb) { |
| 386 | b = oldb |
| 387 | } |
| 388 | } |
| 389 | top := uint8(hash >> (ptrSize*8 - 8)) |
| 390 | if top < minTopHash { |
| 391 | top += minTopHash |
| 392 | } |
| 393 | for { |
| 394 | for i := uintptr(0); i < bucketCnt; i++ { |
| 395 | if b.tophash[i] != top { |
| 396 | continue |
| 397 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 398 | k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 399 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 400 | k = *((*unsafe.Pointer)(k)) |
| 401 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 402 | if alg.equal(key, k) { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 403 | v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 404 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 405 | v = *((*unsafe.Pointer)(v)) |
| 406 | } |
| 407 | return k, v |
| 408 | } |
| 409 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 410 | b = b.overflow(t) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 411 | if b == nil { |
| 412 | return nil, nil |
| 413 | } |
| 414 | } |
| 415 | } |
| 416 | |
| 417 | func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { |
| 418 | if h == nil { |
| 419 | panic("assignment to entry in nil map") |
| 420 | } |
| 421 | if raceenabled { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 422 | callerpc := getcallerpc(unsafe.Pointer(&t)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 423 | pc := funcPC(mapassign1) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 424 | racewritepc(unsafe.Pointer(h), callerpc, pc) |
| 425 | raceReadObjectPC(t.key, key, callerpc, pc) |
| 426 | raceReadObjectPC(t.elem, val, callerpc, pc) |
| 427 | } |
| 428 | |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 429 | alg := t.key.alg |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 430 | hash := alg.hash(key, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 431 | |
| 432 | if h.buckets == nil { |
| 433 | if checkgc { |
| 434 | memstats.next_gc = memstats.heap_alloc |
| 435 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 436 | h.buckets = newarray(t.bucket, 1) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 437 | } |
| 438 | |
| 439 | again: |
| 440 | bucket := hash & (uintptr(1)<<h.B - 1) |
| 441 | if h.oldbuckets != nil { |
| 442 | growWork(t, h, bucket) |
| 443 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 444 | b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 445 | top := uint8(hash >> (ptrSize*8 - 8)) |
| 446 | if top < minTopHash { |
| 447 | top += minTopHash |
| 448 | } |
| 449 | |
| 450 | var inserti *uint8 |
| 451 | var insertk unsafe.Pointer |
| 452 | var insertv unsafe.Pointer |
| 453 | for { |
| 454 | for i := uintptr(0); i < bucketCnt; i++ { |
| 455 | if b.tophash[i] != top { |
| 456 | if b.tophash[i] == empty && inserti == nil { |
| 457 | inserti = &b.tophash[i] |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 458 | insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) |
| 459 | insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 460 | } |
| 461 | continue |
| 462 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 463 | k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 464 | k2 := k |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 465 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 466 | k2 = *((*unsafe.Pointer)(k2)) |
| 467 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 468 | if !alg.equal(key, k2) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 469 | continue |
| 470 | } |
| 471 | // already have a mapping for key. Update it. |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 472 | typedmemmove(t.key, k2, key) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 473 | v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 474 | v2 := v |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 475 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 476 | v2 = *((*unsafe.Pointer)(v2)) |
| 477 | } |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 478 | typedmemmove(t.elem, v2, val) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 479 | return |
| 480 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 481 | ovf := b.overflow(t) |
| 482 | if ovf == nil { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 483 | break |
| 484 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 485 | b = ovf |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | // did not find mapping for key. Allocate new cell & add entry. |
| 489 | if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { |
| 490 | hashGrow(t, h) |
| 491 | goto again // Growing the table invalidates everything, so try again |
| 492 | } |
| 493 | |
| 494 | if inserti == nil { |
| 495 | // all current buckets are full, allocate a new one. |
| 496 | if checkgc { |
| 497 | memstats.next_gc = memstats.heap_alloc |
| 498 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 499 | newb := (*bmap)(newobject(t.bucket)) |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 500 | h.setoverflow(t, b, newb) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 501 | inserti = &newb.tophash[0] |
| 502 | insertk = add(unsafe.Pointer(newb), dataOffset) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 503 | insertv = add(insertk, bucketCnt*uintptr(t.keysize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 504 | } |
| 505 | |
| 506 | // store new key/value at insert position |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 507 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 508 | if checkgc { |
| 509 | memstats.next_gc = memstats.heap_alloc |
| 510 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 511 | kmem := newobject(t.key) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 512 | *(*unsafe.Pointer)(insertk) = kmem |
| 513 | insertk = kmem |
| 514 | } |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 515 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 516 | if checkgc { |
| 517 | memstats.next_gc = memstats.heap_alloc |
| 518 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 519 | vmem := newobject(t.elem) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 520 | *(*unsafe.Pointer)(insertv) = vmem |
| 521 | insertv = vmem |
| 522 | } |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 523 | typedmemmove(t.key, insertk, key) |
| 524 | typedmemmove(t.elem, insertv, val) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 525 | *inserti = top |
| 526 | h.count++ |
| 527 | } |
| 528 | |
| 529 | func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { |
| 530 | if raceenabled && h != nil { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 531 | callerpc := getcallerpc(unsafe.Pointer(&t)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 532 | pc := funcPC(mapdelete) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 533 | racewritepc(unsafe.Pointer(h), callerpc, pc) |
| 534 | raceReadObjectPC(t.key, key, callerpc, pc) |
| 535 | } |
| 536 | if h == nil || h.count == 0 { |
| 537 | return |
| 538 | } |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 539 | alg := t.key.alg |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 540 | hash := alg.hash(key, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 541 | bucket := hash & (uintptr(1)<<h.B - 1) |
| 542 | if h.oldbuckets != nil { |
| 543 | growWork(t, h, bucket) |
| 544 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 545 | b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 546 | top := uint8(hash >> (ptrSize*8 - 8)) |
| 547 | if top < minTopHash { |
| 548 | top += minTopHash |
| 549 | } |
| 550 | for { |
| 551 | for i := uintptr(0); i < bucketCnt; i++ { |
| 552 | if b.tophash[i] != top { |
| 553 | continue |
| 554 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 555 | k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 556 | k2 := k |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 557 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 558 | k2 = *((*unsafe.Pointer)(k2)) |
| 559 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 560 | if !alg.equal(key, k2) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 561 | continue |
| 562 | } |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 563 | memclr(k, uintptr(t.keysize)) |
| 564 | v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize)) |
| 565 | memclr(v, uintptr(t.valuesize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 566 | b.tophash[i] = empty |
| 567 | h.count-- |
| 568 | return |
| 569 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 570 | b = b.overflow(t) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 571 | if b == nil { |
| 572 | return |
| 573 | } |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | func mapiterinit(t *maptype, h *hmap, it *hiter) { |
| 578 | // Clear pointer fields so garbage collector does not complain. |
| 579 | it.key = nil |
| 580 | it.value = nil |
| 581 | it.t = nil |
| 582 | it.h = nil |
| 583 | it.buckets = nil |
| 584 | it.bptr = nil |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 585 | it.overflow[0] = nil |
| 586 | it.overflow[1] = nil |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 587 | |
| 588 | if raceenabled && h != nil { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 589 | callerpc := getcallerpc(unsafe.Pointer(&t)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 590 | racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | if h == nil || h.count == 0 { |
| 594 | it.key = nil |
| 595 | it.value = nil |
| 596 | return |
| 597 | } |
| 598 | |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 599 | if unsafe.Sizeof(hiter{})/ptrSize != 12 { |
Keith Randall | cd5b144 | 2015-03-11 12:58:47 -0700 | [diff] [blame] | 600 | throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 601 | } |
| 602 | it.t = t |
| 603 | it.h = h |
| 604 | |
| 605 | // grab snapshot of bucket state |
| 606 | it.B = h.B |
| 607 | it.buckets = h.buckets |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 608 | if t.bucket.kind&kindNoPointers != 0 { |
| 609 | // Allocate the current slice and remember pointers to both current and old. |
| 610 | // This preserves all relevant overflow buckets alive even if |
| 611 | // the table grows and/or overflow buckets are added to the table |
| 612 | // while we are iterating. |
| 613 | h.createOverflow() |
| 614 | it.overflow = *h.overflow |
| 615 | } |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 616 | |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 617 | // decide where to start |
Keith Randall | 251daf8 | 2014-09-09 14:22:58 -0700 | [diff] [blame] | 618 | r := uintptr(fastrand1()) |
| 619 | if h.B > 31-bucketCntBits { |
| 620 | r += uintptr(fastrand1()) << 31 |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 621 | } |
Keith Randall | 251daf8 | 2014-09-09 14:22:58 -0700 | [diff] [blame] | 622 | it.startBucket = r & (uintptr(1)<<h.B - 1) |
| 623 | it.offset = uint8(r >> h.B & (bucketCnt - 1)) |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 624 | |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 625 | // iterator state |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 626 | it.bucket = it.startBucket |
| 627 | it.wrapped = false |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 628 | it.bptr = nil |
| 629 | |
| 630 | // Remember we have an iterator. |
| 631 | // Can run concurrently with another hash_iter_init(). |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 632 | if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { |
| 633 | atomicor8(&h.flags, iterator|oldIterator) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | mapiternext(it) |
| 637 | } |
| 638 | |
| 639 | func mapiternext(it *hiter) { |
| 640 | h := it.h |
| 641 | if raceenabled { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 642 | callerpc := getcallerpc(unsafe.Pointer(&it)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 643 | racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 644 | } |
| 645 | t := it.t |
| 646 | bucket := it.bucket |
| 647 | b := it.bptr |
| 648 | i := it.i |
| 649 | checkBucket := it.checkBucket |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 650 | alg := t.key.alg |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 651 | |
| 652 | next: |
| 653 | if b == nil { |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 654 | if bucket == it.startBucket && it.wrapped { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 655 | // end of iteration |
| 656 | it.key = nil |
| 657 | it.value = nil |
| 658 | return |
| 659 | } |
| 660 | if h.oldbuckets != nil && it.B == h.B { |
| 661 | // Iterator was started in the middle of a grow, and the grow isn't done yet. |
| 662 | // If the bucket we're looking at hasn't been filled in yet (i.e. the old |
| 663 | // bucket hasn't been evacuated) then we need to iterate through the old |
| 664 | // bucket and only return the ones that will be migrated to this bucket. |
| 665 | oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 666 | b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 667 | if !evacuated(b) { |
| 668 | checkBucket = bucket |
| 669 | } else { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 670 | b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 671 | checkBucket = noCheck |
| 672 | } |
| 673 | } else { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 674 | b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 675 | checkBucket = noCheck |
| 676 | } |
| 677 | bucket++ |
| 678 | if bucket == uintptr(1)<<it.B { |
| 679 | bucket = 0 |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 680 | it.wrapped = true |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 681 | } |
| 682 | i = 0 |
| 683 | } |
| 684 | for ; i < bucketCnt; i++ { |
Keith Randall | 55c458e | 2014-09-08 17:42:21 -0700 | [diff] [blame] | 685 | offi := (i + it.offset) & (bucketCnt - 1) |
| 686 | k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) |
| 687 | v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 688 | if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { |
| 689 | if checkBucket != noCheck { |
| 690 | // Special case: iterator was started during a grow and the |
| 691 | // grow is not done yet. We're working on a bucket whose |
| 692 | // oldbucket has not been evacuated yet. Or at least, it wasn't |
| 693 | // evacuated when we started the bucket. So we're iterating |
| 694 | // through the oldbucket, skipping any keys that will go |
| 695 | // to the other new bucket (each oldbucket expands to two |
| 696 | // buckets during a grow). |
| 697 | k2 := k |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 698 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 699 | k2 = *((*unsafe.Pointer)(k2)) |
| 700 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 701 | if t.reflexivekey || alg.equal(k2, k2) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 702 | // If the item in the oldbucket is not destined for |
| 703 | // the current new bucket in the iteration, skip it. |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 704 | hash := alg.hash(k2, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 705 | if hash&(uintptr(1)<<it.B-1) != checkBucket { |
| 706 | continue |
| 707 | } |
| 708 | } else { |
| 709 | // Hash isn't repeatable if k != k (NaNs). We need a |
| 710 | // repeatable and randomish choice of which direction |
| 711 | // to send NaNs during evacuation. We'll use the low |
| 712 | // bit of tophash to decide which way NaNs go. |
| 713 | // NOTE: this case is why we need two evacuate tophash |
| 714 | // values, evacuatedX and evacuatedY, that differ in |
| 715 | // their low bit. |
| 716 | if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { |
| 717 | continue |
| 718 | } |
| 719 | } |
| 720 | } |
| 721 | if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { |
| 722 | // this is the golden data, we can return it. |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 723 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 724 | k = *((*unsafe.Pointer)(k)) |
| 725 | } |
| 726 | it.key = k |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 727 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 728 | v = *((*unsafe.Pointer)(v)) |
| 729 | } |
| 730 | it.value = v |
| 731 | } else { |
| 732 | // The hash table has grown since the iterator was started. |
| 733 | // The golden data for this key is now somewhere else. |
| 734 | k2 := k |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 735 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 736 | k2 = *((*unsafe.Pointer)(k2)) |
| 737 | } |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 738 | if t.reflexivekey || alg.equal(k2, k2) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 739 | // Check the current hash table for the data. |
| 740 | // This code handles the case where the key |
| 741 | // has been deleted, updated, or deleted and reinserted. |
| 742 | // NOTE: we need to regrab the key as it has potentially been |
| 743 | // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). |
| 744 | rk, rv := mapaccessK(t, h, k2) |
| 745 | if rk == nil { |
| 746 | continue // key has been deleted |
| 747 | } |
| 748 | it.key = rk |
| 749 | it.value = rv |
| 750 | } else { |
| 751 | // if key!=key then the entry can't be deleted or |
| 752 | // updated, so we can just return it. That's lucky for |
| 753 | // us because when key!=key we can't look it up |
| 754 | // successfully in the current table. |
| 755 | it.key = k2 |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 756 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 757 | v = *((*unsafe.Pointer)(v)) |
| 758 | } |
| 759 | it.value = v |
| 760 | } |
| 761 | } |
| 762 | it.bucket = bucket |
| 763 | it.bptr = b |
| 764 | it.i = i + 1 |
| 765 | it.checkBucket = checkBucket |
| 766 | return |
| 767 | } |
| 768 | } |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 769 | b = b.overflow(t) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 770 | i = 0 |
| 771 | goto next |
| 772 | } |
| 773 | |
| 774 | func hashGrow(t *maptype, h *hmap) { |
| 775 | if h.oldbuckets != nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 776 | throw("evacuation not done in time") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 777 | } |
| 778 | oldbuckets := h.buckets |
| 779 | if checkgc { |
| 780 | memstats.next_gc = memstats.heap_alloc |
| 781 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 782 | newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 783 | flags := h.flags &^ (iterator | oldIterator) |
| 784 | if h.flags&iterator != 0 { |
| 785 | flags |= oldIterator |
| 786 | } |
| 787 | // commit the grow (atomic wrt gc) |
| 788 | h.B++ |
| 789 | h.flags = flags |
| 790 | h.oldbuckets = oldbuckets |
| 791 | h.buckets = newbuckets |
| 792 | h.nevacuate = 0 |
| 793 | |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 794 | if h.overflow != nil { |
| 795 | // Promote current overflow buckets to the old generation. |
| 796 | if h.overflow[1] != nil { |
| 797 | throw("overflow is not nil") |
| 798 | } |
| 799 | h.overflow[1] = h.overflow[0] |
| 800 | h.overflow[0] = nil |
| 801 | } |
| 802 | |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 803 | // the actual copying of the hash table data is done incrementally |
| 804 | // by growWork() and evacuate(). |
| 805 | } |
| 806 | |
| 807 | func growWork(t *maptype, h *hmap, bucket uintptr) { |
| 808 | noldbuckets := uintptr(1) << (h.B - 1) |
| 809 | |
| 810 | // make sure we evacuate the oldbucket corresponding |
| 811 | // to the bucket we're about to use |
| 812 | evacuate(t, h, bucket&(noldbuckets-1)) |
| 813 | |
| 814 | // evacuate one more oldbucket to make progress on growing |
| 815 | if h.oldbuckets != nil { |
| 816 | evacuate(t, h, h.nevacuate) |
| 817 | } |
| 818 | } |
| 819 | |
| 820 | func evacuate(t *maptype, h *hmap, oldbucket uintptr) { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 821 | b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 822 | newbit := uintptr(1) << (h.B - 1) |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 823 | alg := t.key.alg |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 824 | if !evacuated(b) { |
| 825 | // TODO: reuse overflow buckets instead of using new ones, if there |
| 826 | // is no iterator using the old buckets. (If !oldIterator.) |
| 827 | |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 828 | x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) |
| 829 | y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 830 | xi := 0 |
| 831 | yi := 0 |
| 832 | xk := add(unsafe.Pointer(x), dataOffset) |
| 833 | yk := add(unsafe.Pointer(y), dataOffset) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 834 | xv := add(xk, bucketCnt*uintptr(t.keysize)) |
| 835 | yv := add(yk, bucketCnt*uintptr(t.keysize)) |
Keith Randall | fbc56cf | 2014-12-19 20:44:18 -0800 | [diff] [blame] | 836 | for ; b != nil; b = b.overflow(t) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 837 | k := add(unsafe.Pointer(b), dataOffset) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 838 | v := add(k, bucketCnt*uintptr(t.keysize)) |
| 839 | for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 840 | top := b.tophash[i] |
| 841 | if top == empty { |
| 842 | b.tophash[i] = evacuatedEmpty |
| 843 | continue |
| 844 | } |
| 845 | if top < minTopHash { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 846 | throw("bad map state") |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 847 | } |
| 848 | k2 := k |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 849 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 850 | k2 = *((*unsafe.Pointer)(k2)) |
| 851 | } |
| 852 | // Compute hash to make our evacuation decision (whether we need |
| 853 | // to send this key/value to bucket x or bucket y). |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 854 | hash := alg.hash(k2, uintptr(h.hash0)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 855 | if h.flags&iterator != 0 { |
Keith Randall | d5e4c40 | 2015-01-06 16:42:48 -0800 | [diff] [blame] | 856 | if !t.reflexivekey && !alg.equal(k2, k2) { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 857 | // If key != key (NaNs), then the hash could be (and probably |
| 858 | // will be) entirely different from the old hash. Moreover, |
| 859 | // it isn't reproducible. Reproducibility is required in the |
| 860 | // presence of iterators, as our evacuation decision must |
| 861 | // match whatever decision the iterator made. |
| 862 | // Fortunately, we have the freedom to send these keys either |
| 863 | // way. Also, tophash is meaningless for these kinds of keys. |
| 864 | // We let the low bit of tophash drive the evacuation decision. |
| 865 | // We recompute a new random tophash for the next level so |
| 866 | // these keys will get evenly distributed across all buckets |
| 867 | // after multiple grows. |
| 868 | if (top & 1) != 0 { |
| 869 | hash |= newbit |
| 870 | } else { |
| 871 | hash &^= newbit |
| 872 | } |
| 873 | top = uint8(hash >> (ptrSize*8 - 8)) |
| 874 | if top < minTopHash { |
| 875 | top += minTopHash |
| 876 | } |
| 877 | } |
| 878 | } |
| 879 | if (hash & newbit) == 0 { |
| 880 | b.tophash[i] = evacuatedX |
| 881 | if xi == bucketCnt { |
| 882 | if checkgc { |
| 883 | memstats.next_gc = memstats.heap_alloc |
| 884 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 885 | newx := (*bmap)(newobject(t.bucket)) |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 886 | h.setoverflow(t, x, newx) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 887 | x = newx |
| 888 | xi = 0 |
| 889 | xk = add(unsafe.Pointer(x), dataOffset) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 890 | xv = add(xk, bucketCnt*uintptr(t.keysize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 891 | } |
| 892 | x.tophash[xi] = top |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 893 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 894 | *(*unsafe.Pointer)(xk) = k2 // copy pointer |
| 895 | } else { |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 896 | typedmemmove(t.key, xk, k) // copy value |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 897 | } |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 898 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 899 | *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) |
| 900 | } else { |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 901 | typedmemmove(t.elem, xv, v) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 902 | } |
| 903 | xi++ |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 904 | xk = add(xk, uintptr(t.keysize)) |
| 905 | xv = add(xv, uintptr(t.valuesize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 906 | } else { |
| 907 | b.tophash[i] = evacuatedY |
| 908 | if yi == bucketCnt { |
| 909 | if checkgc { |
| 910 | memstats.next_gc = memstats.heap_alloc |
| 911 | } |
Keith Randall | 4aa5043 | 2014-07-30 09:01:52 -0700 | [diff] [blame] | 912 | newy := (*bmap)(newobject(t.bucket)) |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 913 | h.setoverflow(t, y, newy) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 914 | y = newy |
| 915 | yi = 0 |
| 916 | yk = add(unsafe.Pointer(y), dataOffset) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 917 | yv = add(yk, bucketCnt*uintptr(t.keysize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 918 | } |
| 919 | y.tophash[yi] = top |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 920 | if t.indirectkey { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 921 | *(*unsafe.Pointer)(yk) = k2 |
| 922 | } else { |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 923 | typedmemmove(t.key, yk, k) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 924 | } |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 925 | if t.indirectvalue { |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 926 | *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) |
| 927 | } else { |
Russ Cox | 54bb4dc | 2014-12-29 10:07:47 -0500 | [diff] [blame] | 928 | typedmemmove(t.elem, yv, v) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 929 | } |
| 930 | yi++ |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 931 | yk = add(yk, uintptr(t.keysize)) |
| 932 | yv = add(yv, uintptr(t.valuesize)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 933 | } |
| 934 | } |
| 935 | } |
| 936 | // Unlink the overflow buckets & clear key/value to help GC. |
| 937 | if h.flags&oldIterator == 0 { |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 938 | b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) |
Keith Randall | 668a55a | 2014-08-01 14:38:56 -0700 | [diff] [blame] | 939 | memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 940 | } |
| 941 | } |
| 942 | |
| 943 | // Advance evacuation mark |
| 944 | if oldbucket == h.nevacuate { |
| 945 | h.nevacuate = oldbucket + 1 |
| 946 | if oldbucket+1 == newbit { // newbit == # of oldbuckets |
| 947 | // Growing is all done. Free old main bucket array. |
| 948 | h.oldbuckets = nil |
Dmitry Vyukov | 85e7bee | 2015-01-26 21:04:41 +0300 | [diff] [blame] | 949 | // Can discard old overflow buckets as well. |
| 950 | // If they are still referenced by an iterator, |
| 951 | // then the iterator holds a pointers to the slice. |
| 952 | if h.overflow != nil { |
| 953 | h.overflow[1] = nil |
| 954 | } |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 955 | } |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | func ismapkey(t *_type) bool { |
Keith Randall | b1f29b2 | 2014-12-27 20:32:11 -0800 | [diff] [blame] | 960 | return t.alg.hash != nil |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 961 | } |
| 962 | |
| 963 | // Reflect stubs. Called from ../reflect/asm_*.s |
| 964 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 965 | //go:linkname reflect_makemap reflect.makemap |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 966 | func reflect_makemap(t *maptype) *hmap { |
Dmitry Vyukov | b3be360 | 2015-01-29 19:40:02 +0300 | [diff] [blame] | 967 | return makemap(t, 0, nil, nil) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 968 | } |
| 969 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 970 | //go:linkname reflect_mapaccess reflect.mapaccess |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 971 | func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { |
| 972 | val, ok := mapaccess2(t, h, key) |
| 973 | if !ok { |
| 974 | // reflect wants nil for a missing element |
| 975 | val = nil |
| 976 | } |
| 977 | return val |
| 978 | } |
| 979 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 980 | //go:linkname reflect_mapassign reflect.mapassign |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 981 | func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { |
| 982 | mapassign1(t, h, key, val) |
| 983 | } |
| 984 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 985 | //go:linkname reflect_mapdelete reflect.mapdelete |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 986 | func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { |
| 987 | mapdelete(t, h, key) |
| 988 | } |
| 989 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 990 | //go:linkname reflect_mapiterinit reflect.mapiterinit |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 991 | func reflect_mapiterinit(t *maptype, h *hmap) *hiter { |
| 992 | it := new(hiter) |
| 993 | mapiterinit(t, h, it) |
| 994 | return it |
| 995 | } |
| 996 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 997 | //go:linkname reflect_mapiternext reflect.mapiternext |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 998 | func reflect_mapiternext(it *hiter) { |
| 999 | mapiternext(it) |
| 1000 | } |
| 1001 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 1002 | //go:linkname reflect_mapiterkey reflect.mapiterkey |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 1003 | func reflect_mapiterkey(it *hiter) unsafe.Pointer { |
| 1004 | return it.key |
| 1005 | } |
| 1006 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 1007 | //go:linkname reflect_maplen reflect.maplen |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 1008 | func reflect_maplen(h *hmap) int { |
| 1009 | if h == nil { |
| 1010 | return 0 |
| 1011 | } |
| 1012 | if raceenabled { |
Russ Cox | d21638b | 2014-08-27 21:59:49 -0400 | [diff] [blame] | 1013 | callerpc := getcallerpc(unsafe.Pointer(&h)) |
Russ Cox | 0e07f1c | 2014-09-03 11:10:38 -0400 | [diff] [blame] | 1014 | racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 1015 | } |
| 1016 | return h.count |
| 1017 | } |
| 1018 | |
Russ Cox | 7a524a1 | 2014-12-22 13:27:53 -0500 | [diff] [blame] | 1019 | //go:linkname reflect_ismapkey reflect.ismapkey |
Keith Randall | 0c6b55e | 2014-07-16 14:16:19 -0700 | [diff] [blame] | 1020 | func reflect_ismapkey(t *_type) bool { |
| 1021 | return ismapkey(t) |
| 1022 | } |